Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Jan 2020 20:11:23 +0000 (12:11 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Jan 2020 20:11:23 +0000 (12:11 -0800)
Pull x86 cleanups from Ingo Molnar:
 "Misc cleanups all around the map"

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/CPU/AMD: Remove amd_get_topology_early()
  x86/tsc: Remove redundant assignment
  x86/crash: Use resource_size()
  x86/cpu: Add a missing prototype for arch_smt_update()
  x86/nospec: Remove unused RSB_FILL_LOOPS
  x86/vdso: Provide missing include file
  x86/Kconfig: Correct spelling and punctuation
  Documentation/x86/boot: Fix typo
  x86/boot: Fix a comment's incorrect file reference
  x86/process: Remove set but not used variables prev and next
  x86/Kconfig: Fix Kconfig indentation

3257 files changed:
.mailmap
Documentation/ABI/obsolete/sysfs-selinux-disable [new file with mode: 0644]
Documentation/ABI/stable/sysfs-class-tpm
Documentation/ABI/stable/sysfs-driver-dma-idxd [new file with mode: 0644]
Documentation/ABI/stable/sysfs-driver-mlxreg-io
Documentation/ABI/testing/sysfs-class-devfreq
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/ABI/testing/sysfs-platform-asus-wmi
Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
Documentation/ABI/testing/sysfs-power
Documentation/RCU/NMI-RCU.rst [moved from Documentation/RCU/NMI-RCU.txt with 73% similarity]
Documentation/RCU/arrayRCU.rst [moved from Documentation/RCU/arrayRCU.txt with 85% similarity]
Documentation/RCU/index.rst
Documentation/RCU/lockdep-splat.txt
Documentation/RCU/rcu_dereference.rst [moved from Documentation/RCU/rcu_dereference.txt with 88% similarity]
Documentation/RCU/rcubarrier.rst [moved from Documentation/RCU/rcubarrier.txt with 72% similarity]
Documentation/RCU/stallwarn.txt
Documentation/RCU/whatisRCU.rst [moved from Documentation/RCU/whatisRCU.txt with 84% similarity]
Documentation/admin-guide/acpi/fan_performance_states.rst [new file with mode: 0644]
Documentation/admin-guide/acpi/index.rst
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/device-mapper/dm-integrity.rst
Documentation/admin-guide/device-mapper/index.rst
Documentation/admin-guide/devices.txt
Documentation/admin-guide/ext4.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/cpuidle.rst
Documentation/admin-guide/pm/intel_idle.rst [new file with mode: 0644]
Documentation/admin-guide/pm/working-state.rst
Documentation/admin-guide/xfs.rst
Documentation/arm64/cpu-feature-registers.rst
Documentation/arm64/elf_hwcaps.rst
Documentation/arm64/silicon-errata.rst
Documentation/core-api/xarray.rst
Documentation/dev-tools/kcov.rst
Documentation/dev-tools/kselftest.rst
Documentation/dev-tools/kunit/index.rst
Documentation/dev-tools/kunit/kunit-tool.rst [new file with mode: 0644]
Documentation/dev-tools/kunit/start.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/arm/atmel-sysregs.txt
Documentation/devicetree/bindings/arm/sunxi.yaml
Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
Documentation/devicetree/bindings/dma/allwinner,sun4i-a10-dma.yaml
Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml
Documentation/devicetree/bindings/dma/fsl-edma.txt
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
Documentation/devicetree/bindings/dma/jz4780-dma.txt
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/dma/ti/k3-udma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/sifive,gpio.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
Documentation/devicetree/bindings/i2c/i2c-at91.txt
Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
Documentation/devicetree/bindings/iio/adc/allwinner,sun8i-a33-ths.yaml
Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-a10-ic.yaml
Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt [deleted file]
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
Documentation/devicetree/bindings/mmc/sdhci-msm.txt
Documentation/devicetree/bindings/mmc/sdhci-omap.txt
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt [deleted file]
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml
Documentation/devicetree/bindings/net/can/tcan4x5x.txt
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
Documentation/devicetree/bindings/power/avs/qcom,cpr.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pwm/allwinner,sun4i-a10-pwm.yaml
Documentation/devicetree/bindings/regulator/mp8859.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/st,stm32-booster.txt [deleted file]
Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt [deleted file]
Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt [deleted file]
Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
Documentation/devicetree/bindings/rtc/allwinner,sun4i-a10-rtc.yaml
Documentation/devicetree/bindings/rtc/allwinner,sun6i-a31-rtc.yaml
Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml
Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-i2s.yaml
Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
Documentation/devicetree/bindings/sound/allwinner,sun50i-a64-codec-analog.yaml
Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
Documentation/devicetree/bindings/sound/allwinner,sun8i-a33-codec.yaml
Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml
Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
Documentation/devicetree/bindings/spi/nuvoton,npcm-pspi.txt
Documentation/devicetree/bindings/spi/spi-controller.yaml
Documentation/devicetree/bindings/spi/spi-stm32.txt [deleted file]
Documentation/devicetree/bindings/spi/spi_atmel.txt
Documentation/devicetree/bindings/spi/st,stm32-spi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.yaml
Documentation/devicetree/bindings/timer/renesas,cmt.txt
Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
Documentation/driver-api/dmaengine/client.rst
Documentation/driver-api/dmaengine/provider.rst
Documentation/driver-api/driver-model/devres.rst
Documentation/features/debug/gcov-profile-all/arch-support.txt
Documentation/filesystems/erofs.txt
Documentation/filesystems/overlayfs.rst [moved from Documentation/filesystems/overlayfs.txt with 99% similarity]
Documentation/firmware-guide/acpi/enumeration.rst
Documentation/hwmon/adm1177.rst [new file with mode: 0644]
Documentation/hwmon/drivetemp.rst [new file with mode: 0644]
Documentation/hwmon/index.rst
Documentation/hwmon/max20730.rst [new file with mode: 0644]
Documentation/hwmon/max31730.rst [new file with mode: 0644]
Documentation/hwmon/pmbus.rst
Documentation/hwmon/ucd9000.rst
Documentation/hwmon/xdpe12284.rst [new file with mode: 0644]
Documentation/kbuild/kconfig-language.rst
Documentation/kbuild/makefiles.rst
Documentation/media/v4l-drivers/meye.rst
Documentation/networking/dsa/sja1105.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/j1939.rst
Documentation/networking/netdev-FAQ.rst
Documentation/process/coding-style.rst
Documentation/process/embargoed-hardware-issues.rst
Documentation/process/index.rst
Documentation/riscv/index.rst
Documentation/riscv/patch-acceptance.rst [new file with mode: 0644]
Documentation/scsi/smartpqi.txt
Documentation/sound/kernel-api/writing-an-alsa-driver.rst
Documentation/translations/it_IT/process/coding-style.rst
Documentation/translations/zh_CN/process/coding-style.rst
Documentation/x86/pat.rst
MAINTAINERS
Makefile
arch/alpha/include/asm/io.h
arch/alpha/include/asm/vmalloc.h [new file with mode: 0644]
arch/arc/Kconfig
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/hugepage.h
arch/arc/include/asm/vmalloc.h [new file with mode: 0644]
arch/arc/kernel/asm-offsets.c
arch/arc/kernel/entry.S
arch/arc/kernel/unwind.c
arch/arc/plat-eznps/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/am335x-boneblack-common.dtsi
arch/arm/boot/dts/am335x-sancloud-bbe.dts
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am572x-idk-common.dtsi
arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
arch/arm/boot/dts/aspeed-g6.dtsi
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/e60k02.dtsi
arch/arm/boot/dts/imx6dl-icore-mipi.dts
arch/arm/boot/dts/imx6q-dhcom-pdk2.dts
arch/arm/boot/dts/imx6q-dhcom-som.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/imx6sll-evk.dts
arch/arm/boot/dts/imx6sx-sdb-reva.dts
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
arch/arm/boot/dts/imx7s-colibri.dtsi
arch/arm/boot/dts/imx7ulp.dtsi
arch/arm/boot/dts/meson8.dtsi
arch/arm/boot/dts/mmp3.dtsi
arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/crypto/curve25519-glue.c
arch/arm/include/asm/arch_gicv3.h
arch/arm/include/asm/efi.h
arch/arm/include/asm/io.h
arch/arm/include/asm/switch_to.h
arch/arm/include/asm/vdso/gettimeofday.h
arch/arm/include/asm/vdso/vsyscall.h
arch/arm/include/asm/vmalloc.h [new file with mode: 0644]
arch/arm/kernel/Makefile
arch/arm/kernel/entry-armv.S
arch/arm/kernel/ftrace.c
arch/arm/kernel/hyp-stub.S
arch/arm/kernel/process.c
arch/arm/kernel/traps.c
arch/arm/mach-bcm/bcm2711.c
arch/arm/mach-bcm/platsmp.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/devices.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-imx/cpu.c
arch/arm/mach-mmp/pxa168.h
arch/arm/mach-mmp/time.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-pxa/magician.c
arch/arm/mach-shmobile/platsmp-apmu.c
arch/arm/mach-shmobile/pm-rcar-gen2.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-r8a7778.c
arch/arm/mach-vexpress/spc.c
arch/arm/mm/cache-v7.S
arch/arm/mm/cache-v7m.S
arch/arm/vdso/Makefile
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/Makefile
arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino-emmc.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
arch/arm64/boot/dts/rockchip/rk3328-a1.dts
arch/arm64/crypto/sha256-glue.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/archrandom.h [new file with mode: 0644]
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/checksum.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/daifflags.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/exception.h
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/kexec.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/lse.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/preempt.h
arch/arm64/include/asm/sections.h
arch/arm64/include/asm/simd.h
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/vdso/compat_gettimeofday.h
arch/arm64/include/asm/vmalloc.h [new file with mode: 0644]
arch/arm64/include/uapi/asm/hwcap.h
arch/arm64/include/uapi/asm/unistd.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu-reset.S
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/kaslr.c
arch/arm64/kernel/kexec_image.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/machine_kexec_file.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/ssbd.c
arch/arm64/kernel/syscall.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/hyp/tlb.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.h
arch/arm64/lib/Makefile
arch/arm64/lib/clear_page.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_page.S
arch/arm64/lib/copy_to_user.S
arch/arm64/lib/crc32.S
arch/arm64/lib/csum.c [new file with mode: 0644]
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S
arch/arm64/lib/memset.S
arch/arm64/lib/strchr.S
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/lib/strnlen.S
arch/arm64/lib/strrchr.S
arch/arm64/lib/tishift.S
arch/arm64/mm/cache.S
arch/arm64/mm/context.c
arch/arm64/mm/fault.c
arch/arm64/mm/mmu.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/proc.S
arch/arm64/xen/hypercall.S
arch/c6x/include/asm/vmalloc.h [new file with mode: 0644]
arch/c6x/kernel/entry.S
arch/csky/include/asm/vmalloc.h [new file with mode: 0644]
arch/csky/kernel/entry.S
arch/h8300/include/asm/vmalloc.h [new file with mode: 0644]
arch/h8300/kernel/entry.S
arch/hexagon/include/asm/atomic.h
arch/hexagon/include/asm/bitops.h
arch/hexagon/include/asm/cmpxchg.h
arch/hexagon/include/asm/futex.h
arch/hexagon/include/asm/io.h
arch/hexagon/include/asm/spinlock.h
arch/hexagon/include/asm/vmalloc.h [new file with mode: 0644]
arch/hexagon/kernel/stacktrace.c
arch/hexagon/kernel/vm_entry.S
arch/ia64/include/asm/acpi.h
arch/ia64/include/asm/vga.h
arch/ia64/include/asm/vmalloc.h [new file with mode: 0644]
arch/ia64/kernel/acpi.c
arch/ia64/kernel/cyclone.c
arch/ia64/kernel/entry.S
arch/ia64/kernel/kprobes.c
arch/ia64/mm/init.c
arch/m68k/Kconfig
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/kmap.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/asm/vmalloc.h [new file with mode: 0644]
arch/m68k/kernel/entry.S
arch/m68k/kernel/process.c
arch/m68k/kernel/syscalls/syscall.tbl
arch/microblaze/Kconfig
arch/microblaze/include/asm/vmalloc.h [new file with mode: 0644]
arch/microblaze/kernel/entry.S
arch/mips/Kconfig
arch/mips/ar7/clock.c
arch/mips/ar7/gpio.c
arch/mips/ar7/platform.c
arch/mips/ath25/ar2315.c
arch/mips/ath25/ar5312.c
arch/mips/ath25/board.c
arch/mips/ath79/common.c
arch/mips/ath79/setup.c
arch/mips/boot/compressed/Makefile
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
arch/mips/cavium-octeon/setup.c
arch/mips/generic/board-ocelot.c
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/cpu-type.h
arch/mips/include/asm/io.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/vdso/gettimeofday.h
arch/mips/include/asm/vmalloc.h [new file with mode: 0644]
arch/mips/kernel/cacheinfo.c
arch/mips/kernel/entry.S
arch/mips/kernel/mips-cm.c
arch/mips/kernel/mips-cpc.c
arch/mips/lantiq/falcon/sysctrl.c
arch/mips/lantiq/irq.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/loongson2ef/common/reset.c
arch/mips/loongson32/common/prom.c
arch/mips/loongson32/common/reset.c
arch/mips/loongson32/common/time.c
arch/mips/loongson64/reset.c
arch/mips/mti-malta/malta-dtshim.c
arch/mips/net/ebpf_jit.c
arch/mips/pci/pci-alchemy.c
arch/mips/pci/pci-ar2315.c
arch/mips/pci/pci-bcm63xx.c
arch/mips/pci/pci-rt2880.c
arch/mips/pic32/pic32mzda/early_console.c
arch/mips/pic32/pic32mzda/early_pin.c
arch/mips/pmcs-msp71xx/msp_serial.c
arch/mips/ralink/irq.c
arch/mips/ralink/of.c
arch/mips/rb532/devices.c
arch/mips/rb532/gpio.c
arch/mips/rb532/prom.c
arch/mips/rb532/setup.c
arch/mips/sni/rm200.c
arch/mips/vdso/Makefile
arch/mips/vdso/vgettimeofday.c
arch/nds32/Kconfig
arch/nds32/include/asm/cacheflush.h
arch/nds32/include/asm/pgtable.h
arch/nds32/include/asm/vmalloc.h [new file with mode: 0644]
arch/nds32/kernel/ex-exit.S
arch/nds32/kernel/ftrace.c
arch/nios2/include/asm/vmalloc.h [new file with mode: 0644]
arch/nios2/kernel/entry.S
arch/nios2/mm/ioremap.c
arch/openrisc/include/asm/vmalloc.h [new file with mode: 0644]
arch/parisc/Kconfig
arch/parisc/include/asm/cmpxchg.h
arch/parisc/include/asm/io.h
arch/parisc/include/asm/kexec.h
arch/parisc/include/asm/vmalloc.h [new file with mode: 0644]
arch/parisc/kernel/Makefile
arch/parisc/kernel/drivers.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/pdt.c
arch/parisc/kernel/perf.c
arch/parisc/kernel/process.c
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
arch/powerpc/include/asm/barrier.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/spinlock.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/include/asm/vmalloc.h [new file with mode: 0644]
arch/powerpc/include/asm/xive-regs.h
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/lib/string_32.S
arch/powerpc/lib/string_64.S
arch/powerpc/mm/mem.c
arch/powerpc/mm/nohash/8xx.c
arch/powerpc/mm/slice.c
arch/powerpc/net/bpf_jit32.h
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/pseries/cmm.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/xive/common.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/boot/Makefile
arch/riscv/boot/dts/sifive/fu540-c000.dtsi
arch/riscv/include/asm/asm-prototypes.h
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/vmalloc.h [new file with mode: 0644]
arch/riscv/kernel/entry.S
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/head.S
arch/riscv/kernel/irq.c
arch/riscv/kernel/process.c
arch/riscv/kernel/riscv_ksyms.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/lib/tishift.S
arch/riscv/lib/uaccess.S
arch/riscv/mm/Makefile
arch/riscv/mm/cacheflush.c
arch/riscv/mm/init.c
arch/riscv/net/bpf_jit_comp.c
arch/s390/Kconfig
arch/s390/include/asm/preempt.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/timex.h
arch/s390/include/asm/uv.h
arch/s390/include/asm/vmalloc.h [new file with mode: 0644]
arch/s390/kernel/dumpstack.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/mcount.S
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/unwind_bc.c
arch/s390/lib/spinlock.c
arch/s390/lib/test_unwind.c
arch/s390/mm/init.c
arch/s390/mm/kasan_init.c
arch/s390/purgatory/.gitignore
arch/s390/purgatory/Makefile
arch/s390/purgatory/string.c [new file with mode: 0644]
arch/sh/Kconfig
arch/sh/boards/board-sh7785lcr.c
arch/sh/boards/mach-cayman/irq.c
arch/sh/boards/mach-cayman/setup.c
arch/sh/boards/mach-sdk7786/fpga.c
arch/sh/drivers/heartbeat.c
arch/sh/drivers/pci/pci-sh5.c
arch/sh/drivers/platform_early.c
arch/sh/include/asm/io.h
arch/sh/include/asm/vmalloc.h [new file with mode: 0644]
arch/sh/kernel/cpu/irq/intc-sh5.c
arch/sh/kernel/cpu/sh2/smp-j2.c
arch/sh/kernel/cpu/sh5/clock-sh5.c
arch/sh/kernel/cpu/sh5/entry.S
arch/sh/kernel/dma-coherent.c
arch/sh/kernel/entry-common.S
arch/sh/kernel/kgdb.c
arch/sh/mm/init.c
arch/sparc/Kconfig
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/vmalloc.h [new file with mode: 0644]
arch/sparc/kernel/rtrap_64.S
arch/sparc/net/bpf_jit_comp_32.c
arch/um/Kconfig
arch/um/include/asm/ptrace-generic.h
arch/um/include/asm/vmalloc.h [new file with mode: 0644]
arch/um/kernel/process.c
arch/unicore32/include/asm/io.h
arch/unicore32/include/asm/vmalloc.h [new file with mode: 0644]
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/boot/compressed/efi_stub_32.S [deleted file]
arch/x86/boot/compressed/efi_stub_64.S [deleted file]
arch/x86/boot/compressed/efi_thunk_64.S
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/setup.ld
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/vdso-layout.lds.S
arch/x86/entry/vdso/vdso2c.c
arch/x86/entry/vdso/vma.c
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/intel/bts.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/perf_event.h
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/cpu_entry_area.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/intel_pmc_ipc.h
arch/x86/include/asm/intel_scu_ipc.h
arch/x86/include/asm/intel_telemetry.h
arch/x86/include/asm/io.h
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/memtype.h [new file with mode: 0644]
arch/x86/include/asm/microcode_amd.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mtrr.h
arch/x86/include/asm/pat.h [deleted file]
arch/x86/include/asm/pci.h
arch/x86/include/asm/pgtable_32_areas.h [new file with mode: 0644]
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_areas.h [new file with mode: 0644]
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/set_memory.h
arch/x86/include/asm/text-patching.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/vdso.h
arch/x86/include/asm/vdso/gettimeofday.h
arch/x86/include/asm/vmalloc.h [new file with mode: 0644]
arch/x86/include/asm/vvar.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/acpi/sleep.h
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/mce/internal.h
arch/x86/kernel/cpu/mce/therm_throt.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/mtrr.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/cpu/tsx.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/hpet.c
arch/x86/kernel/jump_label.c
arch/x86/kernel/kexec-bzimage64.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/kvm.c
arch/x86/kernel/ldt.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/quirks.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/sysfb_simplefb.c
arch/x86/kernel/tboot.c
arch/x86/kernel/traps.c
arch/x86/kernel/unwind_orc.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kernel/x86_init.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/mmu/mmu.c
arch/x86/lib/memmove_64.S
arch/x86/mm/Makefile
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/iomap_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/pat/Makefile [new file with mode: 0644]
arch/x86/mm/pat/cpa-test.c [moved from arch/x86/mm/pageattr-test.c with 100% similarity]
arch/x86/mm/pat/memtype.c [moved from arch/x86/mm/pat.c with 84% similarity]
arch/x86/mm/pat/memtype.h [moved from arch/x86/mm/pat_internal.h with 81% similarity]
arch/x86/mm/pat/memtype_interval.c [new file with mode: 0644]
arch/x86/mm/pat/set_memory.c [moved from arch/x86/mm/pageattr.c with 98% similarity]
arch/x86/mm/pat_interval.c [deleted file]
arch/x86/mm/pgtable_32.c
arch/x86/mm/physaddr.c
arch/x86/mm/testmmiotrace.c
arch/x86/mm/tlb.c
arch/x86/pci/i386.c
arch/x86/pci/mmconfig_64.c
arch/x86/platform/efi/Makefile
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/efi_stub_32.S
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/efi_thunk_64.S
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-quark/imr.c
arch/x86/platform/intel-quark/imr_selftest.c
arch/x86/platform/uv/bios_uv.c
arch/x86/um/tls_32.c
arch/x86/um/tls_64.c
arch/x86/xen/efi.c
arch/x86/xen/mmu_pv.c
arch/xtensa/Kconfig
arch/xtensa/include/asm/vmalloc.h [new file with mode: 0644]
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/process.c
arch/xtensa/kernel/traps.c
block/Kconfig
block/Makefile
block/bfq-iosched.c
block/bfq-wf2q.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-flush.c
block/blk-iocost.c
block/blk-map.c
block/blk-merge.c
block/blk-mq.c
block/blk-settings.c
block/blk-zoned.c
block/blk.h
block/bsg-lib.c
block/compat_ioctl.c
block/partition-generic.c
block/partitions/ldm.c
block/t10-pi.c
crypto/adiantum.c
crypto/asymmetric_keys/asym_tpm.c
crypto/asymmetric_keys/public_key.c
crypto/essiv.c
drivers/acpi/Kconfig
drivers/acpi/acpi_lpit.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/acapps.h
drivers/acpi/acpica/accommon.h
drivers/acpi/acpica/acconvert.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acdispat.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acopcode.h
drivers/acpi/acpica/acparser.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acresrc.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlcode.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/dbhistry.c
drivers/acpi/acpica/dbinput.c
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsdebug.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dspkginit.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/dswscope.c
drivers/acpi/acpica/dswstate.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evglock.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evhandler.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exconcat.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exnames.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/exoparg2.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/exoparg6.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exresnte.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/exresop.c
drivers/acpi/acpica/exserial.c
drivers/acpi/acpica/exstore.c
drivers/acpi/acpica/exstoren.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exsystem.c
drivers/acpi/acpica/extrace.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwacpi.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwgpe.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nsarguments.c
drivers/acpi/acpica/nsconvert.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsdumpdv.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nsprepkg.c
drivers/acpi/acpica/nsrepair.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psobject.c
drivers/acpi/acpica/psopcode.c
drivers/acpi/acpica/psopinfo.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psscope.c
drivers/acpi/acpica/pstree.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/pswalk.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utaddress.c
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utascii.c
drivers/acpi/acpica/utbuffer.c
drivers/acpi/acpica/utcache.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/uthex.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utlock.c
drivers/acpi/acpica/utobject.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utpredef.c
drivers/acpi/acpica/utprint.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utuuid.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/apei/ghes.c
drivers/acpi/arm64/iort.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/dptf/dptf_power.c
drivers/acpi/dptf/int340x_thermal.c
drivers/acpi/ec.c
drivers/acpi/fan.c
drivers/acpi/pptt.c
drivers/acpi/processor_idle.c
drivers/acpi/sleep.c
drivers/acpi/video_detect.c
drivers/android/binder.c
drivers/ata/acard-ahci.c
drivers/ata/ahci_brcm.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_macio.c
drivers/ata/pata_octeon_cf.c
drivers/ata/pata_rb532_cf.c
drivers/ata/sata_fsl.c
drivers/ata/sata_mv.c
drivers/ata/sata_nv.c
drivers/atm/eni.c
drivers/atm/firestream.c
drivers/base/devtmpfs.c
drivers/base/firmware_loader/builtin/Makefile
drivers/base/platform.c
drivers/base/power/runtime.c
drivers/base/power/wakeup.c
drivers/base/regmap/regmap-i2c.c
drivers/base/regmap/regmap.c
drivers/base/swnode.c
drivers/base/test/Kconfig
drivers/base/test/Makefile
drivers/base/test/property-entry-test.c [new file with mode: 0644]
drivers/bcma/driver_chipcommon_b.c
drivers/bcma/driver_pci_host.c
drivers/bcma/host_soc.c
drivers/bcma/scan.c
drivers/block/nbd.c
drivers/block/null_blk_zoned.c
drivers/block/pktcdvd.c
drivers/block/umem.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bus/fsl-mc/mc-io.c
drivers/bus/ti-sysc.c
drivers/char/agp/generic.c
drivers/char/agp/intel-gtt.c
drivers/char/agp/isoch.c
drivers/char/applicom.c
drivers/char/hw_random/intel-rng.c
drivers/char/hw_random/octeon-rng.c
drivers/char/random.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm-dev.h
drivers/char/tpm/tpm-sysfs.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm_ftpm_tee.c
drivers/char/tpm/tpm_tis_core.c
drivers/clk/at91/at91sam9260.c
drivers/clk/at91/at91sam9rl.c
drivers/clk/at91/at91sam9x5.c
drivers/clk/at91/pmc.c
drivers/clk/at91/sama5d2.c
drivers/clk/at91/sama5d4.c
drivers/clk/clk.c
drivers/clk/imx/clk-composite-8m.c
drivers/clk/imx/clk-imx7ulp.c
drivers/clk/imx/clk-pll14xx.c
drivers/clk/mmp/clk-of-mmp2.c
drivers/clk/qcom/gcc-sc7180.c
drivers/clk/qcom/gcc-sdm845.c
drivers/clk/qcom/gpucc-msm8998.c
drivers/clk/renesas/clk-rz.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
drivers/clk/sunxi-ng/ccu-sun8i-r.c
drivers/clk/sunxi-ng/ccu-sun8i-r40.c
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
drivers/clk/tegra/clk.c
drivers/clk/ti/clk-dra7-atl.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/bcm2835_timer.c
drivers/clocksource/em_sti.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/hyperv_timer.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/clocksource/timer-cadence-ttc.c
drivers/clocksource/timer-microchip-pit64b.c [new file with mode: 0644]
drivers/clocksource/timer-riscv.c
drivers/clocksource/timer-ti-dm.c
drivers/cpufreq/brcmstb-avs-cpufreq.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/imx-cpufreq-dt.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/tegra186-cpufreq.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/coupled.c
drivers/cpuidle/cpuidle-clps711x.c
drivers/cpuidle/cpuidle-kirkwood.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/driver.c
drivers/cpuidle/governors/teo.c
drivers/cpuidle/sysfs.c
drivers/crypto/hifn_795x.c
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_crypto.c
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/devfreq/Kconfig
drivers/devfreq/Makefile
drivers/devfreq/devfreq-event.c
drivers/devfreq/devfreq.c
drivers/devfreq/event/Kconfig
drivers/devfreq/event/exynos-nocp.c
drivers/devfreq/event/exynos-nocp.h
drivers/devfreq/event/exynos-ppmu.c
drivers/devfreq/event/exynos-ppmu.h
drivers/devfreq/event/rockchip-dfi.c
drivers/devfreq/exynos-bus.c
drivers/devfreq/imx8m-ddrc.c [new file with mode: 0644]
drivers/devfreq/rk3399_dmc.c
drivers/dma-buf/sync_file.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/altera-msgdma.c
drivers/dma/bcm2835-dma.c
drivers/dma/dma-axi-dmac.c
drivers/dma/dma-jz4780.c
drivers/dma/dmaengine.c
drivers/dma/dmaengine.h
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-common.h
drivers/dma/fsl-edma.c
drivers/dma/fsl-qdma.c
drivers/dma/hisi_dma.c [new file with mode: 0644]
drivers/dma/idxd/Makefile [new file with mode: 0644]
drivers/dma/idxd/cdev.c [new file with mode: 0644]
drivers/dma/idxd/device.c [new file with mode: 0644]
drivers/dma/idxd/dma.c [new file with mode: 0644]
drivers/dma/idxd/idxd.h [new file with mode: 0644]
drivers/dma/idxd/init.c [new file with mode: 0644]
drivers/dma/idxd/irq.c [new file with mode: 0644]
drivers/dma/idxd/registers.h [new file with mode: 0644]
drivers/dma/idxd/submit.c [new file with mode: 0644]
drivers/dma/idxd/sysfs.c [new file with mode: 0644]
drivers/dma/imx-sdma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/init.c
drivers/dma/k3dma.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/of-dma.c
drivers/dma/owl-dma.c
drivers/dma/pl330.c
drivers/dma/plx_dma.c [new file with mode: 0644]
drivers/dma/s3c24xx-dma.c
drivers/dma/sf-pdma/sf-pdma.c
drivers/dma/sun4i-dma.c
drivers/dma/ti/Kconfig
drivers/dma/ti/Makefile
drivers/dma/ti/edma.c
drivers/dma/ti/k3-psil-am654.c [new file with mode: 0644]
drivers/dma/ti/k3-psil-j721e.c [new file with mode: 0644]
drivers/dma/ti/k3-psil-priv.h [new file with mode: 0644]
drivers/dma/ti/k3-psil.c [new file with mode: 0644]
drivers/dma/ti/k3-udma-glue.c [new file with mode: 0644]
drivers/dma/ti/k3-udma-private.c [new file with mode: 0644]
drivers/dma/ti/k3-udma.c [new file with mode: 0644]
drivers/dma/ti/k3-udma.h [new file with mode: 0644]
drivers/dma/virt-dma.c
drivers/dma/virt-dma.h
drivers/dma/xilinx/zynqmp_dma.c
drivers/edac/Kconfig
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/aspeed_edac.c
drivers/edac/i3000_edac.c
drivers/edac/i3200_edac.c
drivers/edac/i5100_edac.c
drivers/edac/i82975x_edac.c
drivers/edac/ie31200_edac.c
drivers/edac/mce_amd.c
drivers/edac/sifive_edac.c
drivers/edac/skx_common.c
drivers/edac/x38_edac.c
drivers/firewire/nosy.c
drivers/firmware/broadcom/bcm47xx_nvram.c
drivers/firmware/broadcom/tee_bnxt_fw.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/capsule-loader.c
drivers/firmware/efi/earlycon.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/fake_mem.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/gop.c
drivers/firmware/efi/libstub/pci.c [new file with mode: 0644]
drivers/firmware/efi/libstub/random.c
drivers/firmware/efi/libstub/secureboot.c
drivers/firmware/efi/libstub/tpm.c
drivers/firmware/efi/memmap.c
drivers/firmware/efi/rci2-table.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-sifive.c [new file with mode: 0644]
drivers/gpio/gpio-thunderx.c
drivers/gpio/gpio-xgs-iproc.c
drivers/gpio/gpio-xtensa.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/acp/Kconfig
drivers/gpu/drm/amd/amdgpu/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdkfd/Kconfig
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/include/i2caux_interface.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/display/intel_frontbuffer.h
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hdcp.h
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/gem/i915_gem_busy.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mpt.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/i915_sw_fence_work.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/selftests/i915_random.h
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/meson/meson_venc_cvbs.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/dispnv50/atom.h
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/panfrost/panfrost_devfreq.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_gem.h
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_job.h
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_mmu.h
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
drivers/gpu/drm/panfrost/panfrost_perfcnt.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rockchip/cdn-dp-core.h
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/sti/sti_vtg.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tcon.h
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-ite.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/uhid.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_wac.c
drivers/hv/hv_util.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/adm1177.c [new file with mode: 0644]
drivers/hwmon/adt7475.c
drivers/hwmon/drivetemp.c [new file with mode: 0644]
drivers/hwmon/hwmon.c
drivers/hwmon/i5k_amb.c
drivers/hwmon/k10temp.c
drivers/hwmon/max31730.c [new file with mode: 0644]
drivers/hwmon/nct7802.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/Makefile
drivers/hwmon/pmbus/ibm-cffps.c
drivers/hwmon/pmbus/max20730.c [new file with mode: 0644]
drivers/hwmon/pmbus/max20751.c
drivers/hwmon/pmbus/pmbus.c
drivers/hwmon/pmbus/pmbus.h
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/pmbus/pxe1610.c
drivers/hwmon/pmbus/tps53679.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/pmbus/xdpe12284.c [new file with mode: 0644]
drivers/hwmon/pwm-fan.c
drivers/hwmon/w83627ehf.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/intel_th.h
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/i2c/busses/i2c-at91-core.c
drivers/i2c/busses/i2c-bcm2835.c
drivers/i2c/busses/i2c-highlander.c
drivers/i2c/busses/i2c-iop3xx.c
drivers/i2c/busses/i2c-pmcmsp.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core-base.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/i3c/master/i3c-master-cdns.c
drivers/idle/intel_idle.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/ad7124.c
drivers/iio/adc/ad7606.c
drivers/iio/adc/ad7949.c
drivers/iio/adc/intel_mrfld_adc.c
drivers/iio/adc/max1027.c
drivers/iio/adc/max9611.c
drivers/iio/chemical/Kconfig
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
drivers/iio/industrialio-buffer.c
drivers/iio/light/vcnl4000.c
drivers/iio/temperature/ltc2983.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/counters.c
drivers/infiniband/core/ib_core_uverbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hfi1/iowait.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/trace_tid.h
drivers/infiniband/hw/hfi1/trace_tx.h
drivers/infiniband/hw/hfi1/verbs.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/cmd.c
drivers/infiniband/hw/mlx5/cmd.h
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/sw/rxe/rxe_recv.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/imx_sc_key.c
drivers/input/keyboard/pxa930_rotary.c
drivers/input/keyboard/sh_keysc.c
drivers/input/misc/keyspan_remote.c
drivers/input/misc/max77650-onkey.c
drivers/input/misc/pm8xxx-vibrator.c
drivers/input/misc/uinput.c
drivers/input/mouse/pxa930_trkball.c
drivers/input/rmi4/rmi_f54.c
drivers/input/rmi4/rmi_smbus.c
drivers/input/serio/gscps2.c
drivers/input/tablet/aiptek.c
drivers/input/tablet/gtco.c
drivers/input/tablet/pegasus_notetaker.c
drivers/input/touchscreen/sun4i-ts.c
drivers/input/touchscreen/sur40.c
drivers/interconnect/qcom/Kconfig
drivers/interconnect/qcom/msm8974.c
drivers/interconnect/qcom/qcs404.c
drivers/interconnect/qcom/sdm845.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-svm.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/ipack/carriers/tpci200.c
drivers/ipack/devices/ipoctal.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-aspeed-scu-ic.c [new file with mode: 0644]
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-imx-intmux.c [new file with mode: 0644]
drivers/irqchip/irq-ingenic.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-meson-gpio.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-nvic.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/irqchip/irq-sifive-plic.c
drivers/leds/leds-as3645a.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lm3532.c
drivers/leds/leds-max77650.c
drivers/leds/leds-rb532.c
drivers/leds/trigger/ledtrig-pattern.c
drivers/lightnvm/pblk-trace.h
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/md/dm-clone-metadata.c
drivers/md/dm-clone-metadata.h
drivers/md/dm-clone-target.c
drivers/md/dm-mpath.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/md-bitmap.c
drivers/md/md.c
drivers/md/md.h
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/cec/cec-adap.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/media/pci/cx18/cx18-driver.c
drivers/media/pci/ivtv/ivtv-driver.c
drivers/media/pci/ivtv/ivtvfb.c
drivers/media/platform/davinci/dm355_ccdc.c
drivers/media/platform/davinci/dm644x_ccdc.c
drivers/media/platform/davinci/isif.c
drivers/media/platform/omap3isp/isppreview.c
drivers/media/platform/tegra-cec/tegra_cec.c
drivers/media/usb/pulse8-cec/pulse8-cec.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/message/fusion/mptctl.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/enclosure.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/context.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/lkdtm/bugs.c
drivers/misc/mic/scif/scif_nodeqp.c
drivers/misc/ocxl/context.c
drivers/misc/ocxl/file.c
drivers/misc/pti.c
drivers/misc/vmw_balloon.c
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/Kconfig
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/au1xmmc.c
drivers/mmc/host/bcm2835.c
drivers/mmc/host/cavium-thunderx.c
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/jz4740_mmc.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/mvsdio.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/owl-mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/renesas_sdhi.h
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-brcmstb.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-milbeaut.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-sirf.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sdhci_am654.c
drivers/mmc/host/sdhci_f_sdh30.c
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/sunxi-mmc.c
drivers/mmc/host/tmio_mmc_core.c
drivers/mmc/host/uniphier-sd.c
drivers/mmc/host/usdhi6rol0.c
drivers/mmc/host/via-sdmmc.c
drivers/mtd/devices/bcm47xxsflash.c
drivers/mtd/maps/amd76xrom.c
drivers/mtd/maps/ck804xrom.c
drivers/mtd/maps/esb2rom.c
drivers/mtd/maps/ichxrom.c
drivers/mtd/maps/intel_vr_nor.c
drivers/mtd/maps/l440gx.c
drivers/mtd/maps/netsc520.c
drivers/mtd/maps/nettel.c
drivers/mtd/maps/pci.c
drivers/mtd/maps/sc520cdp.c
drivers/mtd/maps/scb2_flash.c
drivers/mtd/maps/ts5500_flash.c
drivers/mtd/nand/onenand/omap2.c
drivers/mtd/nand/onenand/onenand_base.c
drivers/mtd/nand/onenand/samsung_mtd.c
drivers/mtd/nand/raw/au1550nd.c
drivers/mtd/nand/raw/cadence-nand-controller.c
drivers/mtd/nand/raw/denali_pci.c
drivers/mtd/nand/raw/fsl_upm.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/stm32_fmc2_nand.c
drivers/mtd/sm_ftl.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/cc770/cc770_isa.c
drivers/net/can/flexcan.c
drivers/net/can/m_can/tcan4x5x.c
drivers/net/can/mscan/mscan.c
drivers/net/can/sja1000/sja1000_isa.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/ocelot/Kconfig
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/dsa/sja1105/sja1105_static_config.c
drivers/net/dsa/sja1105/sja1105_tas.c
drivers/net/ethernet/alacritech/slicoss.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amazon/ena/ena_com.h
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/atheros/ag71xx.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/liquidio/octeon_console.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sched.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
drivers/net/ethernet/i825xx/sni_82596.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/crdump.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/natsemi/sonic.c
drivers/net/ethernet/natsemi/sonic.h
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/offload.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/realtek/r8169_firmware.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/cpsw_ethtool.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/fddi/defxx.c
drivers/net/fddi/defza.c
drivers/net/fjes/fjes_ethtool.c
drivers/net/fjes/fjes_hw.c
drivers/net/fjes/fjes_main.c
drivers/net/fjes/fjes_trace.h
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/macvlan.c
drivers/net/netdevsim/dev.c
drivers/net/phy/Kconfig
drivers/net/phy/aquantia_main.c
drivers/net/phy/dp83867.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/sierra_net.c
drivers/net/usb/usbnet.c
drivers/net/vxlan.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wan/lapbether.c
drivers/net/wan/sdla.c
drivers/net/wan/wanxl.c
drivers/net/wireless/ath/ath10k/ahb.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/trace.h
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/ipw2x00/ipw2100.c
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/marvell/libertas/debugfs.c
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
drivers/net/wireless/marvell/mwifiex/tdls.c
drivers/net/wireless/marvell/mwifiex/util.h
drivers/net/wireless/mediatek/mt76/agg-rx.c
drivers/net/wireless/mediatek/mt76/airtime.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/xen-netback/interface.c
drivers/nfc/nxp-nci/i2c.c
drivers/nfc/pn533/usb.c
drivers/nfc/s3fwrn5/firmware.c
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/loop.c
drivers/of/of_mdio.c
drivers/of/platform.c
drivers/opp/core.c
drivers/opp/of.c
drivers/opp/opp.h
drivers/opp/ti-opp-supply.c
drivers/parisc/ccio-dma.c
drivers/parisc/dino.c
drivers/parisc/eisa.c
drivers/parisc/iosapic.c
drivers/parisc/lba_pci.c
drivers/parisc/sba_iommu.c
drivers/pci/controller/dwc/pci-dra7xx.c
drivers/pci/controller/dwc/pcie-designware-ep.c
drivers/pci/controller/pcie-rockchip-host.c
drivers/pci/msi.c
drivers/pci/pci.c
drivers/pci/quirks.c
drivers/perf/arm_smmuv3_pmu.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/phy/motorola/phy-cpcap-usb.c
drivers/phy/motorola/phy-mapphone-mdm6600.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
drivers/pinctrl/Kconfig
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/bcm/pinctrl-ns2-mux.c
drivers/pinctrl/bcm/pinctrl-nsp-mux.c
drivers/pinctrl/cirrus/Kconfig
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-imx1-core.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinmux.c
drivers/platform/chrome/wilco_ec/keyboard_leds.c
drivers/platform/mellanox/mlxbf-bootctl.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/mips/Kconfig
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/gpd-pocket-fan.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-uncore-frequency.c [new file with mode: 0644]
drivers/platform/x86/intel_atomisp2_pm.c
drivers/platform/x86/intel_cht_int33fe_typec.c
drivers/platform/x86/intel_ips.h
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmc_core.h
drivers/platform/x86/intel_pmc_core_pltdrv.c
drivers/platform/x86/intel_pmc_ipc.c
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/intel_speed_select_if/isst_if_common.c
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/pcengines-apuv2.c
drivers/platform/x86/pmc_atom.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/touchscreen_dmi.c
drivers/pnp/isapnp/core.c
drivers/power/avs/Kconfig
drivers/power/avs/Makefile
drivers/power/avs/qcom-cpr.c [new file with mode: 0644]
drivers/powercap/intel_rapl_common.c
drivers/ptp/Kconfig
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/axp20x-regulator.c
drivers/regulator/bd70528-regulator.c
drivers/regulator/bd71828-regulator.c [new file with mode: 0644]
drivers/regulator/bd718x7-regulator.c
drivers/regulator/core.c
drivers/regulator/da9210-regulator.c
drivers/regulator/da9211-regulator.c
drivers/regulator/helpers.c
drivers/regulator/isl9305.c
drivers/regulator/lp3971.c
drivers/regulator/ltc3676.c
drivers/regulator/max77650-regulator.c
drivers/regulator/mp8859.c [new file with mode: 0644]
drivers/regulator/mpq7920.c [new file with mode: 0644]
drivers/regulator/mpq7920.h [new file with mode: 0644]
drivers/regulator/mt6311-regulator.c
drivers/regulator/pv88060-regulator.c
drivers/regulator/pv88090-regulator.c
drivers/regulator/rk808-regulator.c
drivers/regulator/rn5t618-regulator.c
drivers/regulator/s2mpa01.c
drivers/regulator/s2mps11.c
drivers/regulator/s5m8767.c
drivers/regulator/slg51000-regulator.c
drivers/regulator/sy8106a-regulator.c
drivers/regulator/sy8824x.c
drivers/regulator/ti-abb-regulator.c
drivers/regulator/tps65132-regulator.c
drivers/regulator/vctrl-regulator.c
drivers/regulator/vqmmc-ipq4019-regulator.c [new file with mode: 0644]
drivers/reset/core.c
drivers/reset/reset-brcmstb.c
drivers/rtc/rtc-mc146818-lib.c
drivers/rtc/rtc-mt6397.c
drivers/rtc/rtc-sh.c
drivers/rtc/rtc-sun6i.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.h
drivers/s390/block/dasd_proc.c
drivers/s390/cio/device_ops.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_ccamisc.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2c.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l2_sys.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/aachba.c
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/hpsa.c
drivers/scsi/lasi700.c
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/myrb.c
drivers/scsi/myrs.c
drivers/scsi/pcmcia/nsp_cs.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/sni_53c710.c
drivers/scsi/storvsc_drv.c
drivers/scsi/sun3x_esp.c
drivers/scsi/ufs/cdns-pltfrm.c
drivers/scsi/ufs/ufs_bsg.c
drivers/scsi/zalon.c
drivers/scsi/zorro_esp.c
drivers/sh/clk/core.c
drivers/sh/intc/core.c
drivers/sh/intc/userimask.c
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/amlogic/meson-ee-pwrc.c
drivers/soc/sifive/Kconfig [new file with mode: 0644]
drivers/soc/sifive/Makefile [new file with mode: 0644]
drivers/soc/sifive/sifive_l2_cache.c [moved from arch/riscv/mm/sifive_l2_cache.c with 99% similarity]
drivers/soc/tegra/flowctrl.c
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/soc/tegra/fuse/tegra-apbmisc.c
drivers/soc/tegra/pmc.c
drivers/soc/ti/Kconfig
drivers/soc/ti/Makefile
drivers/soc/ti/k3-ringacc.c [new file with mode: 0644]
drivers/soc/ti/wkup_m3_ipc.c
drivers/soc/xilinx/xlnx_vcu.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-atmel.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-cadence.c
drivers/spi/spi-cavium-thunderx.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw.c
drivers/spi/spi-dw.h
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-fsl-lpspi.c
drivers/spi/spi-fsl-qspi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-hisi-sfc-v3xx.c [new file with mode: 0644]
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-jcore.c
drivers/spi/spi-meson-spicc.c
drivers/spi/spi-mxs.c
drivers/spi/spi-npcm-fiu.c
drivers/spi/spi-npcm-pspi.c
drivers/spi/spi-nxp-fspi.c
drivers/spi/spi-oc-tiny.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-qcom-qspi.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-sirf.c
drivers/spi/spi-sprd.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-stm32.c
drivers/spi/spi-tegra114.c
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi-uniphier.c
drivers/spi/spi.c
drivers/ssb/driver_extif.c
drivers/ssb/driver_pcicore.c
drivers/staging/axis-fifo/Kconfig
drivers/staging/comedi/drivers/adv_pci1710.c
drivers/staging/comedi/drivers/gsc_hpdi.c
drivers/staging/comedi/drivers/ni_routes.c
drivers/staging/exfat/exfat.h
drivers/staging/exfat/exfat_core.c
drivers/staging/exfat/exfat_super.c
drivers/staging/fbtft/fb_uc1611.c
drivers/staging/fbtft/fb_watterott.c
drivers/staging/fbtft/fbtft-core.c
drivers/staging/gasket/gasket_core.c
drivers/staging/hp/Kconfig
drivers/staging/isdn/gigaset/usb-gigaset.c
drivers/staging/kpc2000/kpc2000/core.c
drivers/staging/kpc2000/kpc2000_i2c.c
drivers/staging/kpc2000/kpc2000_spi.c
drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c
drivers/staging/media/allegro-dvt/allegro-core.c
drivers/staging/media/ipu3/include/intel-ipu3.h
drivers/staging/octeon/Kconfig
drivers/staging/qlge/qlge_ethtool.c
drivers/staging/qlge/qlge_main.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rts5208/rtsx.c
drivers/staging/sm750fb/sm750_hw.c
drivers/staging/uwb/whc-rc.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vt6656/baseband.c
drivers/staging/vt6656/card.c
drivers/staging/vt6656/device.h
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/usbpipe.c
drivers/staging/vt6656/usbpipe.h
drivers/staging/vt6656/wcmd.c
drivers/staging/wfx/data_tx.c
drivers/staging/wfx/data_tx.h
drivers/staging/wfx/hif_tx_mib.h
drivers/staging/wfx/main.c
drivers/staging/wfx/queue.c
drivers/staging/wfx/sta.c
drivers/staging/wlan-ng/Kconfig
drivers/target/iscsi/cxgbit/cxgbit_main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_iblock.c
drivers/tc/tc.c
drivers/tee/optee/Kconfig
drivers/tee/optee/shm_pool.c
drivers/thermal/Kconfig
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/intel/int340x_thermal/int3403_thermal.c
drivers/thermal/qcom/tsens.c
drivers/tty/cyclades.c
drivers/tty/mips_ejtag_fdc.c
drivers/tty/moxa.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_gsc.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/dz.c
drivers/tty/serial/lantiq.c
drivers/tty/serial/meson_uart.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/mux.c
drivers/tty/serial/owl-uart.c
drivers/tty/serial/pic32_uart.c
drivers/tty/serial/rda-uart.c
drivers/tty/serial/sb1250-duart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sprd_serial.c
drivers/tty/serial/zs.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/atm/usbatm.c
drivers/usb/cdns3/gadget.c
drivers/usb/chipidea/host.c
drivers/usb/common/usb-conn-gpio.c
drivers/usb/core/config.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/urb.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/host.c
drivers/usb/early/xhci-dbc.c
drivers/usb/gadget/function/f_ecm.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/amd5536udc_pci.c
drivers/usb/gadget/udc/goku_udc.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/ehci-pmcmsp.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ohci-da8xx.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/isp1760/isp1760-if.c
drivers/usb/misc/adutux.c
drivers/usb/misc/idmouse.c
drivers/usb/mon/mon_bin.c
drivers/usb/musb/jz4740.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musbhsdma.c
drivers/usb/roles/class.c
drivers/usb/roles/intel-xhci-usb-role-switch.c
drivers/usb/serial/ch341.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/quatech2.c
drivers/usb/serial/usb-serial-simple.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/usb-wwan.h
drivers/usb/serial/usb_wwan.c
drivers/usb/storage/scsiglue.c
drivers/usb/typec/class.c
drivers/usb/typec/tcpm/Kconfig
drivers/usb/typec/tcpm/tcpci.c
drivers/usb/typec/ucsi/ucsi.h
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/usb/usbip/usbip_common.c
drivers/usb/usbip/vhci_rx.c
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
drivers/vfio/platform/vfio_platform_common.c
drivers/video/fbdev/carminefb.c
drivers/video/fbdev/i810/i810_main.c
drivers/video/fbdev/intelfb/intelfbdrv.c
drivers/video/fbdev/kyro/fbdev.c
drivers/video/fbdev/matrox/matroxfb_base.c
drivers/video/fbdev/mbx/mbxfb.c
drivers/video/fbdev/mmp/hw/mmp_ctrl.c
drivers/video/fbdev/pm2fb.c
drivers/video/fbdev/pm3fb.c
drivers/video/fbdev/pmag-aa-fb.c
drivers/video/fbdev/pmag-ba-fb.c
drivers/video/fbdev/pmagb-b-fb.c
drivers/video/fbdev/pvr2fb.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/s1d13xxxfb.c
drivers/video/fbdev/sh7760fb.c
drivers/video/fbdev/sh_mobile_lcdcfb.c
drivers/video/fbdev/sstfb.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/tdfxfb.c
drivers/video/fbdev/tgafb.c
drivers/video/fbdev/tridentfb.c
drivers/video/fbdev/valkyriefb.c
drivers/video/fbdev/vermilion/cr_pll.c
drivers/video/fbdev/vermilion/vermilion.c
drivers/video/fbdev/via/via-core.c
drivers/video/fbdev/w100fb.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virtio/virtio_balloon.c
drivers/vme/boards/vme_vmivme7805.c
drivers/vme/bridges/vme_ca91cx42.c
drivers/vme/bridges/vme_tsi148.c
drivers/w1/masters/matrox_w1.c
drivers/watchdog/Kconfig
drivers/watchdog/bcm63xx_wdt.c
drivers/watchdog/imx7ulp_wdt.c
drivers/watchdog/intel_scu_watchdog.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/rc32434_wdt.c
drivers/watchdog/rn5t618_wdt.c
drivers/watchdog/w83627hf_wdt.c
drivers/xen/balloon.c
drivers/xen/grant-table.c
drivers/xen/preempt.c
drivers/xen/xenbus/xenbus.h
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe_backend.c
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/dynroot.c
fs/afs/mntpt.c
fs/afs/proc.c
fs/afs/server.c
fs/afs/super.c
fs/btrfs/Kconfig
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/tests/free-space-tree-tests.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/btrfs/uuid-tree.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/mdsmap.c
fs/ceph/super.c
fs/ceph/super.h
fs/char_dev.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/readdir.c
fs/cifs/smb2file.c
fs/cifs/smb2inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/crypto/keyring.c
fs/direct-io.c
fs/drop_caches.c
fs/erofs/xattr.c
fs/ext4/block_validity.c
fs/ext4/dir.c
fs/ext4/ialloc.c
fs/ext4/inode-test.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/fuse/file.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/internal.h
fs/io-wq.c
fs/io-wq.h
fs/io_uring.c
fs/locks.c
fs/mpage.c
fs/namei.c
fs/namespace.c
fs/nfs/nfstrace.h
fs/notify/fsnotify.c
fs/nsfs.c
fs/ocfs2/dlmglue.c
fs/ocfs2/journal.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/export.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/super.c
fs/pipe.c
fs/posix_acl.c
fs/proc/Kconfig
fs/proc/base.c
fs/proc/namespaces.c
fs/proc/stat.c
fs/proc/uptime.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/quota/dquot.c
fs/readdir.c
fs/reiserfs/xattr.c
fs/stack.c
fs/super.c
fs/timerfd.c
fs/verity/enable.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_dir2_priv.h
fs/xfs/libxfs/xfs_dir2_sf.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_ialloc.h
fs/xfs/libxfs/xfs_trans_resv.c
fs/xfs/scrub/trace.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_trace.h
include/acpi/acbuffer.h
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/actypes.h
include/acpi/acuuid.h
include/acpi/platform/acenv.h
include/acpi/platform/acenvex.h
include/acpi/platform/acgcc.h
include/acpi/platform/acgccex.h
include/acpi/platform/acintel.h
include/acpi/platform/aclinux.h
include/acpi/platform/aclinuxex.h
include/asm-generic/cacheflush.h
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/vdso/vsyscall.h
include/clocksource/hyperv_timer.h
include/drm/drm_dp_mst_helper.h
include/dt-bindings/dma/x1830-dma.h [new file with mode: 0644]
include/dt-bindings/interrupt-controller/aspeed-scu-ic.h [new file with mode: 0644]
include/dt-bindings/reset/amlogic,meson8b-reset.h
include/linux/acpi.h
include/linux/ahci_platform.h
include/linux/alarmtimer.h
include/linux/bio.h
include/linux/blk-cgroup.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/bvec.h
include/linux/can/dev.h
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/devfreq.h
include/linux/device.h
include/linux/dma/k3-psil.h [new file with mode: 0644]
include/linux/dma/k3-udma-glue.h [new file with mode: 0644]
include/linux/dma/ti-cppi5.h [new file with mode: 0644]
include/linux/dmaengine.h
include/linux/efi.h
include/linux/filter.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/genhd.h
include/linux/gpio/consumer.h
include/linux/hrtimer.h
include/linux/hugetlb.h
include/linux/hwmon.h
include/linux/i2c.h
include/linux/if_ether.h
include/linux/initrd.h
include/linux/io.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic-v4.h
include/linux/irqdomain.h
include/linux/jbd2.h
include/linux/kasan.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/libata.h
include/linux/list.h
include/linux/list_nulls.h
include/linux/lsm_audit.h
include/linux/memory_hotplug.h
include/linux/mfd/mt6397/rtc.h
include/linux/mfd/tmio.h
include/linux/mm.h
include/linux/mmc/slot-gpio.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/module.h
include/linux/mtd/flashchip.h
include/linux/namei.h
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/nfnetlink.h
include/linux/nsproxy.h
include/linux/nvme-fc-driver.h
include/linux/of_mdio.h
include/linux/pci_ids.h
include/linux/phy.h
include/linux/phy_led_triggers.h
include/linux/pinctrl/consumer.h
include/linux/platform_data/mlxreg.h
include/linux/platform_data/ti-sysc.h
include/linux/platform_data/x86/asus-wmi.h
include/linux/pmbus.h
include/linux/posix-clock.h
include/linux/printk.h
include/linux/proc_ns.h
include/linux/property.h
include/linux/raid/pq.h
include/linux/rcu_segcblist.h
include/linux/rculist.h
include/linux/rculist_nulls.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/regmap.h
include/linux/regulator/consumer.h
include/linux/resctrl.h [new file with mode: 0644]
include/linux/sched.h
include/linux/sched/cpufreq.h
include/linux/sched/isolation.h
include/linux/security.h
include/linux/skmsg.h
include/linux/smp.h
include/linux/soc/ti/k3-ringacc.h [new file with mode: 0644]
include/linux/spi/spi.h
include/linux/spi/spi_oc_tiny.h
include/linux/stop_machine.h
include/linux/suspend.h
include/linux/sxgbe_platform.h
include/linux/syscalls.h
include/linux/tick.h
include/linux/time.h
include/linux/time_namespace.h [new file with mode: 0644]
include/linux/tnum.h
include/linux/tpm.h
include/linux/trace_events.h
include/linux/user_namespace.h
include/linux/vmalloc.h
include/linux/xarray.h
include/net/cfg80211.h
include/net/devlink.h
include/net/dst.h
include/net/dst_ops.h
include/net/garp.h
include/net/inet_hashtables.h
include/net/ip_tunnels.h
include/net/mrp.h
include/net/neighbour.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_tables_core.h
include/net/netns/nftables.h
include/net/sch_generic.h
include/net/sock.h
include/net/tcp.h
include/net/x25.h
include/rdma/ib_verbs.h
include/soc/sifive/sifive_l2_cache.h [moved from arch/riscv/include/asm/sifive_l2_cache.h with 72% similarity]
include/sound/soc.h
include/trace/events/afs.h
include/trace/events/bcache.h
include/trace/events/filemap.h
include/trace/events/huge_memory.h
include/trace/events/preemptirq.h
include/trace/events/rcu.h
include/trace/events/rpm.h
include/trace/events/workqueue.h
include/trace/events/xen.h
include/trace/trace_events.h
include/uapi/asm-generic/mman-common.h
include/uapi/linux/bcache.h
include/uapi/linux/hidraw.h
include/uapi/linux/idxd.h [new file with mode: 0644]
include/uapi/linux/input.h
include/uapi/linux/io_uring.h
include/uapi/linux/kcov.h
include/uapi/linux/netfilter/xt_sctp.h
include/uapi/linux/nl80211.h
include/uapi/linux/sched.h
include/vdso/datapage.h
include/vdso/helpers.h
include/xen/interface/io/ring.h
include/xen/xen-ops.h
include/xen/xenbus.h
init/Kconfig
init/do_mounts.c
init/do_mounts_initrd.c
init/main.c
ipc/util.c
kernel/Kconfig.locks
kernel/audit.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/local_storage.c
kernel/bpf/tnum.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/cgroup/rstat.c
kernel/cpu.c
kernel/cred.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/cpuhotplug.c
kernel/irq/irqdesc.c
kernel/irq/irqdomain.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/kexec.c
kernel/kexec_core.c
kernel/kexec_file.c
kernel/kexec_internal.h
kernel/kprobes.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/locking/mutex.c
kernel/locking/osq_lock.c
kernel/locking/qspinlock.c
kernel/locking/rwsem.c
kernel/locking/spinlock_debug.c
kernel/module.c
kernel/nsproxy.c
kernel/power/Kconfig
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/snapshot.c
kernel/power/suspend.c
kernel/power/suspend_test.c
kernel/ptrace.c
kernel/rcu/Kconfig
kernel/rcu/Makefile
kernel/rcu/rcu.h
kernel/rcu/rcu_segcblist.c
kernel/rcu/rcu_segcblist.h
kernel/rcu/rcuperf.c
kernel/rcu/rcutorture.c
kernel/rcu/srcutiny.c
kernel/rcu/srcutree.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h
kernel/rcu/update.c
kernel/rseq.c
kernel/sched/clock.c
kernel/sched/core.c
kernel/sched/cpufreq.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/cpupri.c
kernel/sched/cpupri.h
kernel/sched/cputime.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/isolation.c
kernel/sched/pelt.c
kernel/sched/psi.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/topology.c
kernel/sched/wait_bit.c
kernel/seccomp.c
kernel/smp.c
kernel/stop_machine.c
kernel/sysctl.c
kernel/taskstats.c
kernel/time/Makefile
kernel/time/alarmtimer.c
kernel/time/hrtimer.c
kernel/time/namespace.c [new file with mode: 0644]
kernel/time/posix-clock.c
kernel/time/posix-cpu-timers.c
kernel/time/posix-stubs.c
kernel/time/posix-timers.c
kernel/time/posix-timers.h
kernel/time/sched_clock.c
kernel/time/tick-common.c
kernel/time/tick-sched.c
kernel/time/vsyscall.c
kernel/trace/fgraph.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_entries.h
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_inject.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_export.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_seq.c
kernel/trace/trace_stack.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
kernel/trace/tracing_map.c
kernel/up.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/crc64.c
lib/debugobjects.c
lib/devres.c
lib/fdt_addresses.c [new file with mode: 0644]
lib/iov_iter.c
lib/livepatch/test_klp_shadow_vars.c
lib/raid6/algos.c
lib/raid6/mktables.c
lib/raid6/unroll.awk
lib/sbitmap.c
lib/strncpy_from_user.c
lib/strnlen_user.c
lib/test_xarray.c
lib/vdso/Kconfig
lib/vdso/gettimeofday.c
lib/xarray.c
mm/gup_benchmark.c
mm/highmem.c
mm/huge_memory.c
mm/hugetlb.c
mm/hugetlb_cgroup.c
mm/kasan/common.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/memremap.c
mm/migrate.c
mm/mmap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/shmem.c
mm/slab.c
mm/slab_common.c
mm/slub.c
mm/sparse.c
mm/vmalloc.c
mm/vmscan.c
mm/zsmalloc.c
net/802/mrp.c
net/8021q/vlan.h
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/atm/proc.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/main.c
net/bpf/test_run.c
net/bridge/br.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_nf_core.c
net/bridge/netfilter/ebtables.c
net/caif/caif_usb.c
net/can/j1939/socket.c
net/core/dev.c
net/core/devlink.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/sock.c
net/core/sock_map.c
net/core/sysctl_net_core.c
net/core/utils.c
net/core/xdp.c
net/dccp/proto.c
net/decnet/dn_route.c
net/dsa/dsa2.c
net/dsa/tag_gswip.c
net/dsa/tag_ksz.c
net/dsa/tag_qca.c
net/hsr/hsr_debugfs.c
net/hsr/hsr_device.c
net/hsr/hsr_framereg.c
net/hsr/hsr_framereg.h
net/hsr/hsr_main.c
net/hsr/hsr_main.h
net/hsr/hsr_netlink.c
net/ipv4/esp4_offload.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_ulp.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/esp6_offload.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/ipv6/seg6_local.c
net/ipv6/sit.c
net/ipv6/xfrm6_policy.c
net/iucv/af_iucv.c
net/llc/llc_station.c
net/mac80211/airtime.c
net/mac80211/cfg.c
net/mac80211/debugfs_sta.c
net/mac80211/main.c
net/mac80211/mesh_hwmp.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tkip.c
net/mac80211/trace.h
net/mac80211/tx.c
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_nat_proto.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_bitwise.c
net/netfilter/nft_cmp.c
net/netfilter/nft_ct.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_osf.c
net/netfilter/nft_range.c
net/netfilter/nft_redir.c
net/netfilter/nft_set_rbtree.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_tunnel.c
net/netfilter/xt_RATEEST.c
net/netlink/af_netlink.c
net/nfc/nci/uart.c
net/openvswitch/datapath.c
net/openvswitch/flow.h
net/packet/af_packet.c
net/qrtr/qrtr.c
net/rfkill/core.c
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/conn_event.c
net/rxrpc/conn_service.c
net/rxrpc/input.c
net/rxrpc/rxkad.c
net/rxrpc/security.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/sched/act_ife.c
net/sched/act_mirred.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/ematch.c
net/sched/sch_cake.c
net/sched/sch_fq.c
net/sched/sch_prio.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/stream.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc_core.c
net/socket.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/tipc/Makefile
net/tipc/bcast.c
net/tipc/crypto.c
net/tipc/discover.c
net/tipc/name_table.c
net/tipc/netlink_compat.c
net/tipc/socket.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-core.c
net/x25/af_x25.c
net/x25/x25_in.c
net/xdp/xsk.c
net/xfrm/xfrm_interface.c
samples/bpf/syscall_tp_kern.c
samples/bpf/trace_event_user.c
samples/livepatch/livepatch-shadow-fix1.c
samples/livepatch/livepatch-shadow-fix2.c
samples/livepatch/livepatch-shadow-mod.c
samples/seccomp/user-trap.c
samples/trace_printk/trace-printk.c
scripts/.gitignore
scripts/Kconfig.include
scripts/Makefile
scripts/checkpatch.pl
scripts/coccinelle/free/devm_free.cocci
scripts/coccinelle/free/iounmap.cocci
scripts/gcc-plugins/Kconfig
scripts/kallsyms.c
scripts/kconfig/expr.c
scripts/link-vmlinux.sh
scripts/mkcompile_h
scripts/package/mkdebian
scripts/recordmcount.c
scripts/sortextable.h [deleted file]
scripts/sorttable.c [moved from scripts/sortextable.c with 67% similarity]
scripts/sorttable.h [new file with mode: 0644]
security/Makefile
security/apparmor/apparmorfs.c
security/apparmor/domain.c
security/apparmor/file.c
security/apparmor/mount.c
security/apparmor/policy.c
security/integrity/ima/ima_policy.c
security/keys/Kconfig
security/keys/Makefile
security/keys/compat.c
security/keys/internal.h
security/keys/trusted-keys/trusted_tpm2.c
security/lockdown/lockdown.c
security/lsm_audit.c
security/security.c
security/selinux/Kconfig
security/selinux/Makefile
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/ibpkey.c
security/selinux/include/avc.h
security/selinux/include/classmap.h
security/selinux/include/ibpkey.h
security/selinux/include/objsec.h
security/selinux/include/security.h
security/selinux/netif.c
security/selinux/netnode.c
security/selinux/netport.c
security/selinux/selinuxfs.c
security/selinux/ss/context.h
security/selinux/ss/policydb.c
security/selinux/ss/policydb.h
security/selinux/ss/services.c
security/selinux/ss/services.h
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
security/tomoyo/common.c
security/tomoyo/domain.c
security/tomoyo/group.c
security/tomoyo/realpath.c
security/tomoyo/util.c
sound/core/pcm_native.c
sound/core/seq/seq_timer.c
sound/drivers/ml403-ac97cr.c
sound/firewire/dice/dice-extension.c
sound/firewire/fireface/ff-pcm.c
sound/firewire/motu/motu-pcm.c
sound/firewire/oxfw/oxfw-pcm.c
sound/firewire/tascam/amdtp-tascam.c
sound/hda/hdac_regmap.c
sound/hda/hdac_stream.c
sound/isa/msnd/msnd_pinnacle.c
sound/parisc/harmony.c
sound/pci/aw2/aw2-alsa.c
sound/pci/cs46xx/cs46xx_lib.c
sound/pci/echoaudio/echoaudio.c
sound/pci/echoaudio/echoaudio_dsp.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1724.c
sound/pci/nm256/nm256.c
sound/pci/rme32.c
sound/pci/rme96.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/pci/rme9652/rme9652.c
sound/pci/sis7019.c
sound/pci/ymfpci/ymfpci_main.c
sound/soc/amd/acp-da7219-max98357a.c
sound/soc/au1x/ac97c.c
sound/soc/au1x/i2sc.c
sound/soc/codecs/cros_ec_codec.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/max98090.c
sound/soc/codecs/max98090.h
sound/soc/codecs/msm8916-wcd-analog.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5677-spi.h
sound/soc/codecs/rt5682.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8962.c
sound/soc/fsl/fsl_audmix.c
sound/soc/generic/simple-card.c
sound/soc/intel/atom/sst/sst.c
sound/soc/intel/atom/sst/sst_acpi.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/cml_rt1011_rt5682.c
sound/soc/intel/common/soc-acpi-intel-cml-match.c
sound/soc/intel/skylake/skl-sst-cldma.c
sound/soc/sh/fsi.c
sound/soc/soc-component.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/imx/imx8.c
sound/soc/sof/intel/byt.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/intel/hda-loader.c
sound/soc/sof/ipc.c
sound/soc/sof/loader.c
sound/soc/sof/topology.c
sound/soc/sti/uniperif_player.c
sound/soc/stm/stm32_adfsdm.c
sound/soc/stm/stm32_sai_sub.c
sound/soc/stm/stm32_spdifrx.c
sound/usb/card.h
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h
sound/x86/intel_hdmi_audio.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/lib/memcpy_64.S
tools/arch/x86/lib/memset_64.S
tools/bpf/bpftool/btf_dumper.c
tools/bpf/bpftool/prog.c
tools/bpf/bpftool/xlated_dumper.c
tools/build/feature/Makefile
tools/build/feature/test-clang.cpp
tools/cgroup/iocost_monitor.py
tools/include/linux/bitmap.h
tools/include/linux/string.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fscrypt.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/sched.h
tools/include/uapi/linux/stat.h
tools/lib/api/fs/fs.c
tools/lib/bitmap.c
tools/lib/bpf/Makefile
tools/lib/perf/Build [moved from tools/perf/lib/Build with 100% similarity]
tools/lib/perf/Documentation/Makefile [new file with mode: 0644]
tools/lib/perf/Documentation/asciidoc.conf [new file with mode: 0644]
tools/lib/perf/Documentation/examples/sampling.c [new file with mode: 0644]
tools/lib/perf/Documentation/libperf-counting.txt [new file with mode: 0644]
tools/lib/perf/Documentation/libperf-sampling.txt [new file with mode: 0644]
tools/lib/perf/Documentation/libperf.txt [new file with mode: 0644]
tools/lib/perf/Documentation/manpage-1.72.xsl [new file with mode: 0644]
tools/lib/perf/Documentation/manpage-base.xsl [new file with mode: 0644]
tools/lib/perf/Documentation/manpage-bold-literal.xsl [new file with mode: 0644]
tools/lib/perf/Documentation/manpage-normal.xsl [new file with mode: 0644]
tools/lib/perf/Documentation/manpage-suppress-sp.xsl [new file with mode: 0644]
tools/lib/perf/Makefile [moved from tools/perf/lib/Makefile with 96% similarity]
tools/lib/perf/core.c [moved from tools/perf/lib/core.c with 100% similarity]
tools/lib/perf/cpumap.c [moved from tools/perf/lib/cpumap.c with 77% similarity]
tools/lib/perf/evlist.c [moved from tools/perf/lib/evlist.c with 99% similarity]
tools/lib/perf/evsel.c [moved from tools/perf/lib/evsel.c with 77% similarity]
tools/lib/perf/include/internal/cpumap.h [moved from tools/perf/lib/include/internal/cpumap.h with 100% similarity]
tools/lib/perf/include/internal/evlist.h [moved from tools/perf/lib/include/internal/evlist.h with 99% similarity]
tools/lib/perf/include/internal/evsel.h [moved from tools/perf/lib/include/internal/evsel.h with 100% similarity]
tools/lib/perf/include/internal/lib.h [moved from tools/perf/lib/include/internal/lib.h with 100% similarity]
tools/lib/perf/include/internal/mmap.h [moved from tools/perf/lib/include/internal/mmap.h with 100% similarity]
tools/lib/perf/include/internal/tests.h [moved from tools/perf/lib/include/internal/tests.h with 100% similarity]
tools/lib/perf/include/internal/threadmap.h [moved from tools/perf/lib/include/internal/threadmap.h with 100% similarity]
tools/lib/perf/include/internal/xyarray.h [moved from tools/perf/lib/include/internal/xyarray.h with 100% similarity]
tools/lib/perf/include/perf/core.h [moved from tools/perf/lib/include/perf/core.h with 100% similarity]
tools/lib/perf/include/perf/cpumap.h [moved from tools/perf/lib/include/perf/cpumap.h with 89% similarity]
tools/lib/perf/include/perf/event.h [moved from tools/perf/lib/include/perf/event.h with 100% similarity]
tools/lib/perf/include/perf/evlist.h [moved from tools/perf/lib/include/perf/evlist.h with 100% similarity]
tools/lib/perf/include/perf/evsel.h [moved from tools/perf/lib/include/perf/evsel.h with 84% similarity]
tools/lib/perf/include/perf/mmap.h [moved from tools/perf/lib/include/perf/mmap.h with 100% similarity]
tools/lib/perf/include/perf/threadmap.h [moved from tools/perf/lib/include/perf/threadmap.h with 100% similarity]
tools/lib/perf/internal.h [moved from tools/perf/lib/internal.h with 100% similarity]
tools/lib/perf/lib.c [moved from tools/perf/lib/lib.c with 100% similarity]
tools/lib/perf/libperf.map [moved from tools/perf/lib/libperf.map with 100% similarity]
tools/lib/perf/libperf.pc.template [moved from tools/perf/lib/libperf.pc.template with 100% similarity]
tools/lib/perf/mmap.c [moved from tools/perf/lib/mmap.c with 100% similarity]
tools/lib/perf/tests/Makefile [moved from tools/perf/lib/tests/Makefile with 93% similarity]
tools/lib/perf/tests/test-cpumap.c [moved from tools/perf/lib/tests/test-cpumap.c with 100% similarity]
tools/lib/perf/tests/test-evlist.c [moved from tools/perf/lib/tests/test-evlist.c with 100% similarity]
tools/lib/perf/tests/test-evsel.c [moved from tools/perf/lib/tests/test-evsel.c with 100% similarity]
tools/lib/perf/tests/test-threadmap.c [moved from tools/perf/lib/tests/test-threadmap.c with 100% similarity]
tools/lib/perf/threadmap.c [moved from tools/perf/lib/threadmap.c with 100% similarity]
tools/lib/perf/xyarray.c [moved from tools/perf/lib/xyarray.c with 100% similarity]
tools/lib/string.c
tools/lib/traceevent/Makefile
tools/lib/traceevent/parse-filter.c
tools/lib/traceevent/plugins/Makefile
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-kvm.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-sched.txt
tools/perf/Documentation/perf-top.txt
tools/perf/MANIFEST
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/tests/regs_load.S
tools/perf/arch/arm64/tests/regs_load.S
tools/perf/arch/x86/tests/regs_load.S
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-inject.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/check-headers.sh
tools/perf/examples/bpf/5sec.c
tools/perf/lib/Documentation/Makefile [deleted file]
tools/perf/lib/Documentation/man/libperf.rst [deleted file]
tools/perf/lib/Documentation/tutorial/tutorial.rst [deleted file]
tools/perf/pmu-events/arch/s390/cf_z13/extended.json
tools/perf/pmu-events/arch/s390/cf_z14/extended.json
tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
tools/perf/tests/Build
tools/perf/tests/bp_signal.c
tools/perf/tests/builtin-test.c
tools/perf/tests/cpumap.c
tools/perf/tests/event-times.c
tools/perf/tests/genelf.c [new file with mode: 0644]
tools/perf/tests/tests.h
tools/perf/trace/beauty/clone.c
tools/perf/trace/beauty/sockaddr.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/hists.h
tools/perf/ui/browsers/res_sample.c
tools/perf/ui/browsers/scripts.c
tools/perf/ui/gtk/Build
tools/perf/ui/tui/util.c
tools/perf/ui/util.h
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/c++/clang.cpp
tools/perf/util/cpumap.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/expr.y
tools/perf/util/genelf.c
tools/perf/util/header.c
tools/perf/util/hist.h
tools/perf/util/include/linux/linkage.h
tools/perf/util/machine.c
tools/perf/util/metricgroup.c
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/parse-events.y
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/symbol-elf.c
tools/power/acpi/Makefile.config
tools/power/acpi/common/cmfsize.c
tools/power/acpi/common/getopt.c
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
tools/power/acpi/os_specific/service_layers/osunixdir.c
tools/power/acpi/os_specific/service_layers/osunixmap.c
tools/power/acpi/os_specific/service_layers/osunixxf.c
tools/power/acpi/tools/acpidump/acpidump.h
tools/power/acpi/tools/acpidump/apdump.c
tools/power/acpi/tools/acpidump/apfiles.c
tools/power/acpi/tools/acpidump/apmain.c
tools/power/x86/intel-speed-select/isst-config.c
tools/power/x86/intel-speed-select/isst-core.c
tools/power/x86/intel-speed-select/isst-display.c
tools/power/x86/intel-speed-select/isst.h
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/nvdimm/Kbuild
tools/testing/nvdimm/test/iomap.c
tools/testing/nvdimm/test/nfit_test.h
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/.gitignore
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_ftrace.sh [new file with mode: 0755]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/ref_tracking.c
tools/testing/selftests/bpf/verifier/runtime_jit.c
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
tools/testing/selftests/filesystems/epoll/Makefile
tools/testing/selftests/firmware/fw_lib.sh
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc
tools/testing/selftests/ftrace/test.d/functions
tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
tools/testing/selftests/kselftest/module.sh
tools/testing/selftests/kselftest/prefix.pl
tools/testing/selftests/kselftest/runner.sh
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/livepatch/test-state.sh
tools/testing/selftests/net/forwarding/loopback.sh
tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/netfilter/nft_flowtable.sh
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/rcutorture/bin/cpus2use.sh
tools/testing/selftests/rcutorture/bin/jitter.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/mkinitrd.sh
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/settings [new file with mode: 0644]
tools/testing/selftests/safesetid/Makefile
tools/testing/selftests/safesetid/safesetid-test.c
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
tools/testing/selftests/tc-testing/tc-tests/filters/u32.json [new file with mode: 0644]
tools/testing/selftests/timens/.gitignore [new file with mode: 0644]
tools/testing/selftests/timens/Makefile [new file with mode: 0644]
tools/testing/selftests/timens/clock_nanosleep.c [new file with mode: 0644]
tools/testing/selftests/timens/config [new file with mode: 0644]
tools/testing/selftests/timens/exec.c [new file with mode: 0644]
tools/testing/selftests/timens/gettime_perf.c [new file with mode: 0644]
tools/testing/selftests/timens/log.h [new file with mode: 0644]
tools/testing/selftests/timens/procfs.c [new file with mode: 0644]
tools/testing/selftests/timens/timens.c [new file with mode: 0644]
tools/testing/selftests/timens/timens.h [new file with mode: 0644]
tools/testing/selftests/timens/timer.c [new file with mode: 0644]
tools/testing/selftests/timens/timerfd.c [new file with mode: 0644]
tools/testing/selftests/tpm2/test_smoke.sh
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
usr/gen_initramfs_list.sh
usr/include/Makefile
virt/kvm/arm/arm.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-init.c

index c24773db04a7ab3ee9ddd5e8472796363f720be1..0c36f3317457d28d00c132125ff900f7e280ce0e 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -74,6 +74,7 @@ Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
 Domen Puncer <domen@coderock.org>
 Douglas Gilbert <dougg@torque.net>
 Ed L. Cashin <ecashin@coraid.com>
+Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 Felipe W Damasio <felipewd@terra.com.br>
 Felix Kuhling <fxkuehl@gmx.de>
@@ -99,6 +100,7 @@ Jacob Shin <Jacob.Shin@amd.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
+Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
 James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
@@ -152,6 +154,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
+Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
 Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
@@ -207,6 +210,10 @@ Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
 Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
+Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.ibm.com>
+Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.vnet.ibm.com>
+Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
+Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com>
 Peter A Jonsson <pj@ludd.ltu.se>
 Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
@@ -215,6 +222,7 @@ Praveen BP <praveenbp@ti.com>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
 Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
 Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
+Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
 Rajesh Shah <rajesh.shah@intel.com>
 Ralf Baechle <ralf@linux-mips.org>
 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
@@ -265,6 +273,7 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
+Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
 Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
@@ -276,3 +285,5 @@ Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
diff --git a/Documentation/ABI/obsolete/sysfs-selinux-disable b/Documentation/ABI/obsolete/sysfs-selinux-disable
new file mode 100644 (file)
index 0000000..c340278
--- /dev/null
@@ -0,0 +1,26 @@
+What:          /sys/fs/selinux/disable
+Date:          April 2005 (predates git)
+KernelVersion: 2.6.12-rc2 (predates git)
+Contact:       selinux@vger.kernel.org
+Description:
+
+       The selinuxfs "disable" node allows SELinux to be disabled at runtime
+       prior to a policy being loaded into the kernel.  If disabled via this
+       mechanism, SELinux will remain disabled until the system is rebooted.
+
+       The preferred method of disabling SELinux is via the "selinux=0" boot
+       parameter, but the selinuxfs "disable" node was created to make it
+       easier for systems with primitive bootloaders that did not allow for
+       easy modification of the kernel command line.  Unfortunately, allowing
+       for SELinux to be disabled at runtime makes it difficult to secure the
+       kernel's LSM hooks using the "__ro_after_init" feature.
+
+       Thankfully, the need for the SELinux runtime disable appears to be
+       gone, the default Kconfig configuration disables this selinuxfs node,
+       and only one of the major distributions, Fedora, supports disabling
+       SELinux at runtime.  Fedora is in the process of removing the
+       selinuxfs "disable" node and once that is complete we will start the
+       slow process of removing this code from the kernel.
+
+       More information on /sys/fs/selinux/disable can be found under the
+       CONFIG_SECURITY_SELINUX_DISABLE Kconfig option.
index c0e23830f56a453ceb15610b67527220882dd072..58e94e7d55be5f8ec949545c677b148f0ec46868 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/class/tpm/tpmX/device/
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The device/ directory under a specific TPM instance exposes
                the properties of that TPM chip
 
@@ -9,7 +9,7 @@ Description:    The device/ directory under a specific TPM instance exposes
 What:          /sys/class/tpm/tpmX/device/active
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "active" property prints a '1' if the TPM chip is accepting
                commands. An inactive TPM chip still contains all the state of
                an active chip (Storage Root Key, NVRAM, etc), and can be
@@ -21,7 +21,7 @@ Description:  The "active" property prints a '1' if the TPM chip is accepting
 What:          /sys/class/tpm/tpmX/device/cancel
 Date:          June 2005
 KernelVersion: 2.6.13
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "cancel" property allows you to cancel the currently
                pending TPM command. Writing any value to cancel will call the
                TPM vendor specific cancel operation.
@@ -29,7 +29,7 @@ Description:  The "cancel" property allows you to cancel the currently
 What:          /sys/class/tpm/tpmX/device/caps
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "caps" property contains TPM manufacturer and version info.
 
                Example output:
@@ -46,7 +46,7 @@ Description:  The "caps" property contains TPM manufacturer and version info.
 What:          /sys/class/tpm/tpmX/device/durations
 Date:          March 2011
 KernelVersion: 3.1
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "durations" property shows the 3 vendor-specific values
                used to wait for a short, medium and long TPM command. All
                TPM commands are categorized as short, medium or long in
@@ -69,7 +69,7 @@ Description:  The "durations" property shows the 3 vendor-specific values
 What:          /sys/class/tpm/tpmX/device/enabled
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "enabled" property prints a '1' if the TPM chip is enabled,
                meaning that it should be visible to the OS. This property
                may be visible but produce a '0' after some operation that
@@ -78,7 +78,7 @@ Description:  The "enabled" property prints a '1' if the TPM chip is enabled,
 What:          /sys/class/tpm/tpmX/device/owned
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "owned" property produces a '1' if the TPM_TakeOwnership
                ordinal has been executed successfully in the chip. A '0'
                indicates that ownership hasn't been taken.
@@ -86,7 +86,7 @@ Description:  The "owned" property produces a '1' if the TPM_TakeOwnership
 What:          /sys/class/tpm/tpmX/device/pcrs
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "pcrs" property will dump the current value of all Platform
                Configuration Registers in the TPM. Note that since these
                values may be constantly changing, the output is only valid
@@ -109,7 +109,7 @@ Description:        The "pcrs" property will dump the current value of all Platform
 What:          /sys/class/tpm/tpmX/device/pubek
 Date:          April 2005
 KernelVersion: 2.6.12
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "pubek" property will return the TPM's public endorsement
                key if possible. If the TPM has had ownership established and
                is version 1.2, the pubek will not be available without the
@@ -161,7 +161,7 @@ Description:        The "pubek" property will return the TPM's public endorsement
 What:          /sys/class/tpm/tpmX/device/temp_deactivated
 Date:          April 2006
 KernelVersion: 2.6.17
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "temp_deactivated" property returns a '1' if the chip has
                been temporarily deactivated, usually until the next power
                cycle. Whether a warm boot (reboot) will clear a TPM chip
@@ -170,7 +170,7 @@ Description:        The "temp_deactivated" property returns a '1' if the chip has
 What:          /sys/class/tpm/tpmX/device/timeouts
 Date:          March 2011
 KernelVersion: 3.1
-Contact:       tpmdd-devel@lists.sf.net
+Contact:       linux-integrity@vger.kernel.org
 Description:   The "timeouts" property shows the 4 vendor-specific values
                for the TPM's interface spec timeouts. The use of these
                timeouts is defined by the TPM interface spec that the chip
@@ -183,3 +183,14 @@ Description:       The "timeouts" property shows the 4 vendor-specific values
                The four timeout values are shown in usecs, with a trailing
                "[original]" or "[adjusted]" depending on whether the values
                were scaled by the driver to be reported in usec from msecs.
+
+What:          /sys/class/tpm/tpmX/tpm_version_major
+Date:          October 2019
+KernelVersion: 5.5
+Contact:       linux-integrity@vger.kernel.org
+Description:   The "tpm_version_major" property shows the TCG spec major version
+               implemented by the TPM device.
+
+               Example output:
+
+               2
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
new file mode 100644 (file)
index 0000000..f4be46c
--- /dev/null
@@ -0,0 +1,171 @@
+What:           sys/bus/dsa/devices/dsa<m>/cdev_major
+Date:           Oct 25, 2019
+KernelVersion:         5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:   The major number that the character device driver assigned to
+               this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/errors
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The error information for this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_batch_size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The largest number of work descriptors in a batch.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_work_queues_size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum work queue size supported by this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_engines
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum number of engines supported by this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_groups
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum number of groups can be created under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_tokens
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The total number of bandwidth tokens supported by this device.
+               The bandwidth tokens represent resources within the DSA
+               implementation, and these resources are allocated by engines to
+               support operations.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_transfer_size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The number of bytes to be read from the source address to
+               perform the operation. The maximum transfer size is dependent on
+               the workqueue the descriptor was submitted to.
+
+What:           sys/bus/dsa/devices/dsa<m>/max_work_queues
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum work queue number that this device supports.
+
+What:           sys/bus/dsa/devices/dsa<m>/numa_node
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The numa node number for this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/op_cap
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The operation capability bit mask specify the operation types
+               supported by the this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/state
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The state information of this device. It can be either enabled
+               or disabled.
+
+What:           sys/bus/dsa/devices/dsa<m>/group<m>.<n>
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The assigned group under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/engine<m>.<n>
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The assigned engine under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/wq<m>.<n>
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The assigned work queue under this device.
+
+What:           sys/bus/dsa/devices/dsa<m>/configurable
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    To indicate if this device is configurable or not.
+
+What:           sys/bus/dsa/devices/dsa<m>/token_limit
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The maximum number of bandwidth tokens that may be in use at
+               one time by operations that access low bandwidth memory in the
+               device.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/group_id
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The group id that this work queue belongs to.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/size
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The work queue size for this work queue.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/type
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The type of this work queue, it can be "kernel" type for work
+               queue usages in the kernel space or "user" type for work queue
+               usages by applications in user space.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/cdev_minor
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The minor number assigned to this work queue by the character
+               device driver.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/mode
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The work queue mode type for this work queue.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/priority
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The priority value of this work queue, it is a vlue relative to
+               other work queue in the same group to control quality of service
+               for dispatching work from multiple workqueues in the same group.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/state
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The current state of the work queue.
+
+What:           sys/bus/dsa/devices/wq<m>.<n>/threshold
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The number of entries in this work queue that may be filled
+               via a limited portal.
+
+What:           sys/bus/dsa/devices/engine<m>.<n>/group_id
+Date:           Oct 25, 2019
+KernelVersion:  5.6.0
+Contact:        dmaengine@vger.kernel.org
+Description:    The group that this engine belongs to.
index 8ca498447aeb9f4bfbc8b6e71cdf2086893ea5f3..b0d90cc696a83dd66732fa9ceb687b8f73892726 100644 (file)
@@ -1,5 +1,4 @@
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic_health
-
 Date:          June 2018
 KernelVersion: 4.19
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -19,7 +18,6 @@ Description:  These files show with which CPLD versions have been burned
                The files are read only.
 
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/fan_dir
-
 Date:          December 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -29,18 +27,16 @@ Description:        This file shows the system fans direction:
 
                The files are read only.
 
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
-
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
 Description:   These files show with which CPLD versions have been burned
-               on LED board.
+               on LED or Gearbox board.
 
                The files are read only.
 
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
-
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -108,7 +104,6 @@ What:               /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_pwr_fail
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_comex
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_system
 What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_voltmon_upgrade_fail
-
 Date:          November 2018
 KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -121,6 +116,21 @@ Description:       These files show the system reset cause, as following: ComEx
 
                The files are read only.
 
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version
+Date:          November 2018
+KernelVersion: 5.0
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   These files show with which CPLD versions have been burned
+               on LED board.
+
+               The files are read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_thermal
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_wd
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_asic
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_reload_bios
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
 Date:          June 2019
 KernelVersion: 5.3
 Contact:       Vadim Pasternak <vadimpmellanox.com>
@@ -134,9 +144,65 @@ Description:       These files show the system reset cause, as following:
 
                The files are read only.
 
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_thermal
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_wd
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_asic
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_reload_bios
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
-What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config1
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config2
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   These files show system static topology identification
+               like system's static I2C topology, number and type of FPGA
+               devices within the system and so on.
+
+               The files are read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_ac_pwr_fail
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_platform
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_soc
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sw_pwr_off
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   These files show the system reset causes, as following: reset
+               due to AC power failure, reset invoked from software by
+               assertion reset signal through CPLD. reset caused by signal
+               asserted by SOC through ACPI register, reset invoked from
+               software by assertion power off signal through CPLD.
+
+               The files are read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pcie_asic_reset_dis
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file allows to retain ASIC up during PCIe root complex
+               reset, when attribute is set 1.
+
+               The file is read/write.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/vpd_wp
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file allows to overwrite system VPD hardware wrtie
+               protection when attribute is set 1.
+
+               The file is read/write.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/voltreg_update_status
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file exposes the configuration update status of burnable
+               voltage regulator devices. The status values are as following:
+               0 - OK; 1 - CRC failure; 2 = I2C failure; 3 - in progress.
+
+               The file is read only.
+
+What:          /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ufm_version
+Date:          January 2020
+KernelVersion: 5.6
+Contact:       Vadim Pasternak <vadimpmellanox.com>
+Description:   This file exposes the firmware version of burnable voltage
+               regulator devices.
+
+               The file is read only.
index 01196e19afca3b0050f9c056b1db2841e89d0334..9758eb85ade3ea461787f2707c14eb400df5a660 100644 (file)
@@ -7,6 +7,13 @@ Description:
                The name of devfreq object denoted as ... is same as the
                name of device using devfreq.
 
+What:          /sys/class/devfreq/.../name
+Date:          November 2019
+Contact:       Chanwoo Choi <cw00.choi@samsung.com>
+Description:
+               The /sys/class/devfreq/.../name shows the name of device
+               of the corresponding devfreq object.
+
 What:          /sys/class/devfreq/.../governor
 Date:          September 2011
 Contact:       MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -48,12 +55,15 @@ What:               /sys/class/devfreq/.../trans_stat
 Date:          October 2012
 Contact:       MyungJoo Ham <myungjoo.ham@samsung.com>
 Description:
-               This ABI shows the statistics of devfreq behavior on a
-               specific device. It shows the time spent in each state and
-               the number of transitions between states.
+               This ABI shows or clears the statistics of devfreq behavior
+               on a specific device. It shows the time spent in each state
+               and the number of transitions between states.
                In order to activate this ABI, the devfreq target device
                driver should provide the list of available frequencies
-               with its profile.
+               with its profile. If need to reset the statistics of devfreq
+               behavior on a specific device, enter 0(zero) to 'trans_stat'
+               as following:
+                       echo 0 > /sys/class/devfreq/.../trans_stat
 
 What:          /sys/class/devfreq/.../userspace/set_freq
 Date:          September 2011
index fc20cde63d1eac1fd563385542c7659c965c5e18..2e0e3b45d02a7f7bd428d45de2f37265d9055aed 100644 (file)
@@ -196,6 +196,12 @@ Description:
                does not reflect it. Likewise, if one enables a deep state but a
                lighter state still is disabled, then this has no effect.
 
+What:          /sys/devices/system/cpu/cpuX/cpuidle/stateN/default_status
+Date:          December 2019
+KernelVersion: v5.6
+Contact:       Linux power management list <linux-pm@vger.kernel.org>
+Description:
+               (RO) The default status of this state, "enabled" or "disabled".
 
 What:          /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
 Date:          March 2014
index 9e99f29096127227919d16afb5aa7f14317dfc1d..1efac0ddb417e0b04ffeeb11573eca9488147f06 100644 (file)
@@ -46,3 +46,13 @@ Description:
                        * 0 - normal,
                        * 1 - overboost,
                        * 2 - silent
+
+What:          /sys/devices/platform/<platform>/throttle_thermal_policy
+Date:          Dec 2019
+KernelVersion: 5.6
+Contact:       "Leonid Maksymchuk" <leonmaxx@gmail.com>
+Description:
+               Throttle thermal policy mode:
+                       * 0 - default,
+                       * 1 - overboost,
+                       * 2 - silent
index c65a8057486921ae53d505c7a03f0a76333c5bed..401d202f478b230014ce7a6ee247cbea76dc45da 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/bus/platform/devices/MLNXBF04:00/driver/lifecycle_state
+What:          /sys/bus/platform/devices/MLNXBF04:00/lifecycle_state
 Date:          Oct 2019
 KernelVersion: 5.5
 Contact:       "Liming Sun <lsun@mellanox.com>"
@@ -10,7 +10,7 @@ Description:
                  GA Non-Secured - Non-Secure chip and not able to change state
                  RMA - Return Merchandise Authorization
 
-What:          /sys/bus/platform/devices/MLNXBF04:00/driver/post_reset_wdog
+What:          /sys/bus/platform/devices/MLNXBF04:00/post_reset_wdog
 Date:          Oct 2019
 KernelVersion: 5.5
 Contact:       "Liming Sun <lsun@mellanox.com>"
@@ -19,7 +19,7 @@ Description:
                to reboot the chip and recover it to the old state if the new
                boot partition fails.
 
-What:          /sys/bus/platform/devices/MLNXBF04:00/driver/reset_action
+What:          /sys/bus/platform/devices/MLNXBF04:00/reset_action
 Date:          Oct 2019
 KernelVersion: 5.5
 Contact:       "Liming Sun <lsun@mellanox.com>"
@@ -30,7 +30,7 @@ Description:
                  emmc - boot from the onchip eMMC
                  emmc_legacy - boot from the onchip eMMC in legacy (slow) mode
 
-What:          /sys/bus/platform/devices/MLNXBF04:00/driver/second_reset_action
+What:          /sys/bus/platform/devices/MLNXBF04:00/second_reset_action
 Date:          Oct 2019
 KernelVersion: 5.5
 Contact:       "Liming Sun <lsun@mellanox.com>"
@@ -44,7 +44,7 @@ Description:
                  swap_emmc - swap the primary / secondary boot partition
                  none - cancel the action
 
-What:          /sys/bus/platform/devices/MLNXBF04:00/driver/secure_boot_fuse_state
+What:          /sys/bus/platform/devices/MLNXBF04:00/secure_boot_fuse_state
 Date:          Oct 2019
 KernelVersion: 5.5
 Contact:       "Liming Sun <lsun@mellanox.com>"
index 6f87b9dd384b8b7faf72be043625df5dba5d7afc..5e6ead29124ccefb781645ba1b00af77f242367e 100644 (file)
@@ -407,3 +407,16 @@ Contact:   Kalesh Singh <kaleshsingh96@gmail.com>
 Description:
                The /sys/power/suspend_stats/last_failed_step file contains
                the last failed step in the suspend/resume path.
+
+What:          /sys/power/sync_on_suspend
+Date:          October 2019
+Contact:       Jonas Meurer <jonas@freesources.org>
+Description:
+               This file controls whether or not the kernel will sync()
+               filesystems during system suspend (after freezing user space
+               and before suspending devices).
+
+               Writing a "1" to this file enables the sync() and writing a "0"
+               disables it.  Reads from the file return the current value.
+               The default is "1" if the build-time "SUSPEND_SKIP_SYNC" config
+               flag is unset, or "0" otherwise.
similarity index 73%
rename from Documentation/RCU/NMI-RCU.txt
rename to Documentation/RCU/NMI-RCU.rst
index 881353fd5bff1cbc1f3dead8009f5b86cb65832a..180958388ff94afb557b20922b720956900bab4b 100644 (file)
@@ -1,4 +1,7 @@
+.. _NMI_rcu_doc:
+
 Using RCU to Protect Dynamic NMI Handlers
+=========================================
 
 
 Although RCU is usually used to protect read-mostly data structures,
@@ -9,7 +12,7 @@ work in "arch/x86/oprofile/nmi_timer_int.c" and in
 "arch/x86/kernel/traps.c".
 
 The relevant pieces of code are listed below, each followed by a
-brief explanation.
+brief explanation::
 
        static int dummy_nmi_callback(struct pt_regs *regs, int cpu)
        {
@@ -18,12 +21,12 @@ brief explanation.
 
 The dummy_nmi_callback() function is a "dummy" NMI handler that does
 nothing, but returns zero, thus saying that it did nothing, allowing
-the NMI handler to take the default machine-specific action.
+the NMI handler to take the default machine-specific action::
 
        static nmi_callback_t nmi_callback = dummy_nmi_callback;
 
 This nmi_callback variable is a global function pointer to the current
-NMI handler.
+NMI handler::
 
        void do_nmi(struct pt_regs * regs, long error_code)
        {
@@ -53,11 +56,12 @@ anyway.  However, in practice it is a good documentation aid, particularly
 for anyone attempting to do something similar on Alpha or on systems
 with aggressive optimizing compilers.
 
-Quick Quiz:  Why might the rcu_dereference_sched() be necessary on Alpha,
-            given that the code referenced by the pointer is read-only?
+Quick Quiz:
+               Why might the rcu_dereference_sched() be necessary on Alpha, given that the code referenced by the pointer is read-only?
 
+:ref:`Answer to Quick Quiz <answer_quick_quiz_NMI>`
 
-Back to the discussion of NMI and RCU...
+Back to the discussion of NMI and RCU::
 
        void set_nmi_callback(nmi_callback_t callback)
        {
@@ -68,7 +72,7 @@ The set_nmi_callback() function registers an NMI handler.  Note that any
 data that is to be used by the callback must be initialized up -before-
 the call to set_nmi_callback().  On architectures that do not order
 writes, the rcu_assign_pointer() ensures that the NMI handler sees the
-initialized values.
+initialized values::
 
        void unset_nmi_callback(void)
        {
@@ -82,7 +86,7 @@ up any data structures used by the old NMI handler until execution
 of it completes on all other CPUs.
 
 One way to accomplish this is via synchronize_rcu(), perhaps as
-follows:
+follows::
 
        unset_nmi_callback();
        synchronize_rcu();
@@ -98,24 +102,23 @@ to free up the handler's data as soon as synchronize_rcu() returns.
 Important note: for this to work, the architecture in question must
 invoke nmi_enter() and nmi_exit() on NMI entry and exit, respectively.
 
+.. _answer_quick_quiz_NMI:
 
-Answer to Quick Quiz
-
-       Why might the rcu_dereference_sched() be necessary on Alpha, given
-       that the code referenced by the pointer is read-only?
+Answer to Quick Quiz:
+       Why might the rcu_dereference_sched() be necessary on Alpha, given that the code referenced by the pointer is read-only?
 
-       Answer: The caller to set_nmi_callback() might well have
-               initialized some data that is to be used by the new NMI
-               handler.  In this case, the rcu_dereference_sched() would
-               be needed, because otherwise a CPU that received an NMI
-               just after the new handler was set might see the pointer
-               to the new NMI handler, but the old pre-initialized
-               version of the handler's data.
+       The caller to set_nmi_callback() might well have
+       initialized some data that is to be used by the new NMI
+       handler.  In this case, the rcu_dereference_sched() would
+       be needed, because otherwise a CPU that received an NMI
+       just after the new handler was set might see the pointer
+       to the new NMI handler, but the old pre-initialized
+       version of the handler's data.
 
-               This same sad story can happen on other CPUs when using
-               a compiler with aggressive pointer-value speculation
-               optimizations.
+       This same sad story can happen on other CPUs when using
+       a compiler with aggressive pointer-value speculation
+       optimizations.
 
-               More important, the rcu_dereference_sched() makes it
-               clear to someone reading the code that the pointer is
-               being protected by RCU-sched.
+       More important, the rcu_dereference_sched() makes it
+       clear to someone reading the code that the pointer is
+       being protected by RCU-sched.
similarity index 85%
rename from Documentation/RCU/arrayRCU.txt
rename to Documentation/RCU/arrayRCU.rst
index f05a9afb2c39b61efb841886e6f272adfa9da797..4051ea3871eff0075843d9eee1725b178e4090fd 100644 (file)
@@ -1,19 +1,21 @@
-Using RCU to Protect Read-Mostly Arrays
+.. _array_rcu_doc:
 
+Using RCU to Protect Read-Mostly Arrays
+=======================================
 
 Although RCU is more commonly used to protect linked lists, it can
 also be used to protect arrays.  Three situations are as follows:
 
-1.  Hash Tables
+1.  :ref:`Hash Tables <hash_tables>`
 
-2.  Static Arrays
+2.  :ref:`Static Arrays <static_arrays>`
 
-3.  Resizeable Arrays
+3.  :ref:`Resizable Arrays <resizable_arrays>`
 
 Each of these three situations involves an RCU-protected pointer to an
 array that is separately indexed.  It might be tempting to consider use
 of RCU to instead protect the index into an array, however, this use
-case is -not- supported.  The problem with RCU-protected indexes into
+case is **not** supported.  The problem with RCU-protected indexes into
 arrays is that compilers can play way too many optimization games with
 integers, which means that the rules governing handling of these indexes
 are far more trouble than they are worth.  If RCU-protected indexes into
@@ -24,16 +26,20 @@ to be safely used.
 That aside, each of the three RCU-protected pointer situations are
 described in the following sections.
 
+.. _hash_tables:
 
 Situation 1: Hash Tables
+------------------------
 
 Hash tables are often implemented as an array, where each array entry
 has a linked-list hash chain.  Each hash chain can be protected by RCU
 as described in the listRCU.txt document.  This approach also applies
 to other array-of-list situations, such as radix trees.
 
+.. _static_arrays:
 
 Situation 2: Static Arrays
+--------------------------
 
 Static arrays, where the data (rather than a pointer to the data) is
 located in each array element, and where the array is never resized,
@@ -41,13 +47,17 @@ have not been used with RCU.  Rik van Riel recommends using seqlock in
 this situation, which would also have minimal read-side overhead as long
 as updates are rare.
 
-Quick Quiz:  Why is it so important that updates be rare when
-            using seqlock?
+Quick Quiz:
+               Why is it so important that updates be rare when using seqlock?
+
+:ref:`Answer to Quick Quiz <answer_quick_quiz_seqlock>`
 
+.. _resizable_arrays:
 
-Situation 3: Resizeable Arrays
+Situation 3: Resizable Arrays
+------------------------------
 
-Use of RCU for resizeable arrays is demonstrated by the grow_ary()
+Use of RCU for resizable arrays is demonstrated by the grow_ary()
 function formerly used by the System V IPC code.  The array is used
 to map from semaphore, message-queue, and shared-memory IDs to the data
 structure that represents the corresponding IPC construct.  The grow_ary()
@@ -60,7 +70,7 @@ the remainder of the new, updates the ids->entries pointer to point to
 the new array, and invokes ipc_rcu_putref() to free up the old array.
 Note that rcu_assign_pointer() is used to update the ids->entries pointer,
 which includes any memory barriers required on whatever architecture
-you are running on.
+you are running on::
 
        static int grow_ary(struct ipc_ids* ids, int newsize)
        {
@@ -112,7 +122,7 @@ a simple check suffices.  The pointer to the structure corresponding
 to the desired IPC object is placed in "out", with NULL indicating
 a non-existent entry.  After acquiring "out->lock", the "out->deleted"
 flag indicates whether the IPC object is in the process of being
-deleted, and, if not, the pointer is returned.
+deleted, and, if not, the pointer is returned::
 
        struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
        {
@@ -144,8 +154,10 @@ deleted, and, if not, the pointer is returned.
                return out;
        }
 
+.. _answer_quick_quiz_seqlock:
 
 Answer to Quick Quiz:
+       Why is it so important that updates be rare when using seqlock?
 
        The reason that it is important that updates be rare when
        using seqlock is that frequent updates can livelock readers.
index 5c99185710fa4b083e144c988090aedd4fb96a7a..81a0a1e5f767e870c3daaeffbd67ca2bbc958071 100644 (file)
@@ -7,8 +7,13 @@ RCU concepts
 .. toctree::
    :maxdepth: 3
 
+   arrayRCU
+   rcubarrier
+   rcu_dereference
+   whatisRCU
    rcu
    listRCU
+   NMI-RCU
    UP
 
    Design/Memory-Ordering/Tree-RCU-Memory-Ordering
index 9c015976b174123f52b2fd1a046bcb616df423b3..b8096316fd116228b5323357b5eb879bb3dc1a45 100644 (file)
@@ -99,7 +99,7 @@ With this change, the rcu_dereference() is always within an RCU
 read-side critical section, which again would have suppressed the
 above lockdep-RCU splat.
 
-But in this particular case, we don't actually deference the pointer
+But in this particular case, we don't actually dereference the pointer
 returned from rcu_dereference().  Instead, that pointer is just compared
 to the cic pointer, which means that the rcu_dereference() can be replaced
 by rcu_access_pointer() as follows:
similarity index 88%
rename from Documentation/RCU/rcu_dereference.txt
rename to Documentation/RCU/rcu_dereference.rst
index bf699e8cfc75ca18fe2ee94472ee46e6d2a6a9c8..c9667eb0d44469308c401f8270bbf7ea261dab6d 100644 (file)
@@ -1,4 +1,7 @@
+.. _rcu_dereference_doc:
+
 PROPER CARE AND FEEDING OF RETURN VALUES FROM rcu_dereference()
+===============================================================
 
 Most of the time, you can use values from rcu_dereference() or one of
 the similar primitives without worries.  Dereferencing (prefix "*"),
@@ -8,7 +11,7 @@ subtraction of constants, and casts all work quite naturally and safely.
 It is nevertheless possible to get into trouble with other operations.
 Follow these rules to keep your RCU code working properly:
 
-o      You must use one of the rcu_dereference() family of primitives
+-      You must use one of the rcu_dereference() family of primitives
        to load an RCU-protected pointer, otherwise CONFIG_PROVE_RCU
        will complain.  Worse yet, your code can see random memory-corruption
        bugs due to games that compilers and DEC Alpha can play.
@@ -25,24 +28,24 @@ o   You must use one of the rcu_dereference() family of primitives
        for an example where the compiler can in fact deduce the exact
        value of the pointer, and thus cause misordering.
 
-o      You are only permitted to use rcu_dereference on pointer values.
+-      You are only permitted to use rcu_dereference on pointer values.
        The compiler simply knows too much about integral values to
        trust it to carry dependencies through integer operations.
        There are a very few exceptions, namely that you can temporarily
        cast the pointer to uintptr_t in order to:
 
-       o       Set bits and clear bits down in the must-be-zero low-order
+       -       Set bits and clear bits down in the must-be-zero low-order
                bits of that pointer.  This clearly means that the pointer
                must have alignment constraints, for example, this does
                -not- work in general for char* pointers.
 
-       o       XOR bits to translate pointers, as is done in some
+       -       XOR bits to translate pointers, as is done in some
                classic buddy-allocator algorithms.
 
        It is important to cast the value back to pointer before
        doing much of anything else with it.
 
-o      Avoid cancellation when using the "+" and "-" infix arithmetic
+-      Avoid cancellation when using the "+" and "-" infix arithmetic
        operators.  For example, for a given variable "x", avoid
        "(x-(uintptr_t)x)" for char* pointers.  The compiler is within its
        rights to substitute zero for this sort of expression, so that
@@ -54,16 +57,16 @@ o   Avoid cancellation when using the "+" and "-" infix arithmetic
        "p+a-b" is safe because its value still necessarily depends on
        the rcu_dereference(), thus maintaining proper ordering.
 
-o      If you are using RCU to protect JITed functions, so that the
+-      If you are using RCU to protect JITed functions, so that the
        "()" function-invocation operator is applied to a value obtained
        (directly or indirectly) from rcu_dereference(), you may need to
        interact directly with the hardware to flush instruction caches.
        This issue arises on some systems when a newly JITed function is
        using the same memory that was used by an earlier JITed function.
 
-o      Do not use the results from relational operators ("==", "!=",
+-      Do not use the results from relational operators ("==", "!=",
        ">", ">=", "<", or "<=") when dereferencing.  For example,
-       the following (quite strange) code is buggy:
+       the following (quite strange) code is buggy::
 
                int *p;
                int *q;
@@ -81,11 +84,11 @@ o   Do not use the results from relational operators ("==", "!=",
        after such branches, but can speculate loads, which can again
        result in misordering bugs.
 
-o      Be very careful about comparing pointers obtained from
+-      Be very careful about comparing pointers obtained from
        rcu_dereference() against non-NULL values.  As Linus Torvalds
        explained, if the two pointers are equal, the compiler could
        substitute the pointer you are comparing against for the pointer
-       obtained from rcu_dereference().  For example:
+       obtained from rcu_dereference().  For example::
 
                p = rcu_dereference(gp);
                if (p == &default_struct)
@@ -93,7 +96,7 @@ o     Be very careful about comparing pointers obtained from
 
        Because the compiler now knows that the value of "p" is exactly
        the address of the variable "default_struct", it is free to
-       transform this code into the following:
+       transform this code into the following::
 
                p = rcu_dereference(gp);
                if (p == &default_struct)
@@ -105,14 +108,14 @@ o Be very careful about comparing pointers obtained from
 
        However, comparisons are OK in the following cases:
 
-       o       The comparison was against the NULL pointer.  If the
+       -       The comparison was against the NULL pointer.  If the
                compiler knows that the pointer is NULL, you had better
                not be dereferencing it anyway.  If the comparison is
                non-equal, the compiler is none the wiser.  Therefore,
                it is safe to compare pointers from rcu_dereference()
                against NULL pointers.
 
-       o       The pointer is never dereferenced after being compared.
+       -       The pointer is never dereferenced after being compared.
                Since there are no subsequent dereferences, the compiler
                cannot use anything it learned from the comparison
                to reorder the non-existent subsequent dereferences.
@@ -124,31 +127,31 @@ o Be very careful about comparing pointers obtained from
                dereferenced, rcu_access_pointer() should be used in place
                of rcu_dereference().
 
-       o       The comparison is against a pointer that references memory
+       -       The comparison is against a pointer that references memory
                that was initialized "a long time ago."  The reason
                this is safe is that even if misordering occurs, the
                misordering will not affect the accesses that follow
                the comparison.  So exactly how long ago is "a long
                time ago"?  Here are some possibilities:
 
-               o       Compile time.
+               -       Compile time.
 
-               o       Boot time.
+               -       Boot time.
 
-               o       Module-init time for module code.
+               -       Module-init time for module code.
 
-               o       Prior to kthread creation for kthread code.
+               -       Prior to kthread creation for kthread code.
 
-               o       During some prior acquisition of the lock that
+               -       During some prior acquisition of the lock that
                        we now hold.
 
-               o       Before mod_timer() time for a timer handler.
+               -       Before mod_timer() time for a timer handler.
 
                There are many other possibilities involving the Linux
                kernel's wide array of primitives that cause code to
                be invoked at a later time.
 
-       o       The pointer being compared against also came from
+       -       The pointer being compared against also came from
                rcu_dereference().  In this case, both pointers depend
                on one rcu_dereference() or another, so you get proper
                ordering either way.
@@ -159,13 +162,13 @@ o Be very careful about comparing pointers obtained from
                of such an RCU usage bug is shown in the section titled
                "EXAMPLE OF AMPLIFIED RCU-USAGE BUG".
 
-       o       All of the accesses following the comparison are stores,
+       -       All of the accesses following the comparison are stores,
                so that a control dependency preserves the needed ordering.
                That said, it is easy to get control dependencies wrong.
                Please see the "CONTROL DEPENDENCIES" section of
                Documentation/memory-barriers.txt for more details.
 
-       o       The pointers are not equal -and- the compiler does
+       -       The pointers are not equal -and- the compiler does
                not have enough information to deduce the value of the
                pointer.  Note that the volatile cast in rcu_dereference()
                will normally prevent the compiler from knowing too much.
@@ -175,7 +178,7 @@ o   Be very careful about comparing pointers obtained from
                comparison will provide exactly the information that the
                compiler needs to deduce the value of the pointer.
 
-o      Disable any value-speculation optimizations that your compiler
+-      Disable any value-speculation optimizations that your compiler
        might provide, especially if you are making use of feedback-based
        optimizations that take data collected from prior runs.  Such
        value-speculation optimizations reorder operations by design.
@@ -188,11 +191,12 @@ o Disable any value-speculation optimizations that your compiler
 
 
 EXAMPLE OF AMPLIFIED RCU-USAGE BUG
+----------------------------------
 
 Because updaters can run concurrently with RCU readers, RCU readers can
 see stale and/or inconsistent values.  If RCU readers need fresh or
 consistent values, which they sometimes do, they need to take proper
-precautions.  To see this, consider the following code fragment:
+precautions.  To see this, consider the following code fragment::
 
        struct foo {
                int a;
@@ -244,7 +248,7 @@ to some reordering from the compiler and CPUs is beside the point.
 
 But suppose that the reader needs a consistent view?
 
-Then one approach is to use locking, for example, as follows:
+Then one approach is to use locking, for example, as follows::
 
        struct foo {
                int a;
@@ -299,6 +303,7 @@ As always, use the right tool for the job!
 
 
 EXAMPLE WHERE THE COMPILER KNOWS TOO MUCH
+-----------------------------------------
 
 If a pointer obtained from rcu_dereference() compares not-equal to some
 other pointer, the compiler normally has no clue what the value of the
@@ -308,7 +313,7 @@ guarantees that RCU depends on.  And the volatile cast in rcu_dereference()
 should prevent the compiler from guessing the value.
 
 But without rcu_dereference(), the compiler knows more than you might
-expect.  Consider the following code fragment:
+expect.  Consider the following code fragment::
 
        struct foo {
                int a;
@@ -354,6 +359,7 @@ dereference the resulting pointer.
 
 
 WHICH MEMBER OF THE rcu_dereference() FAMILY SHOULD YOU USE?
+------------------------------------------------------------
 
 First, please avoid using rcu_dereference_raw() and also please avoid
 using rcu_dereference_check() and rcu_dereference_protected() with a
@@ -370,7 +376,7 @@ member of the rcu_dereference() to use in various situations:
 
 2.     If the access might be within an RCU read-side critical section
        on the one hand, or protected by (say) my_lock on the other,
-       use rcu_dereference_check(), for example:
+       use rcu_dereference_check(), for example::
 
                p1 = rcu_dereference_check(p->rcu_protected_pointer,
                                           lockdep_is_held(&my_lock));
@@ -378,14 +384,14 @@ member of the rcu_dereference() to use in various situations:
 
 3.     If the access might be within an RCU read-side critical section
        on the one hand, or protected by either my_lock or your_lock on
-       the other, again use rcu_dereference_check(), for example:
+       the other, again use rcu_dereference_check(), for example::
 
                p1 = rcu_dereference_check(p->rcu_protected_pointer,
                                           lockdep_is_held(&my_lock) ||
                                           lockdep_is_held(&your_lock));
 
 4.     If the access is on the update side, so that it is always protected
-       by my_lock, use rcu_dereference_protected():
+       by my_lock, use rcu_dereference_protected()::
 
                p1 = rcu_dereference_protected(p->rcu_protected_pointer,
                                               lockdep_is_held(&my_lock));
@@ -410,18 +416,19 @@ member of the rcu_dereference() to use in various situations:
 
 
 SPARSE CHECKING OF RCU-PROTECTED POINTERS
+-----------------------------------------
 
 The sparse static-analysis tool checks for direct access to RCU-protected
 pointers, which can result in "interesting" bugs due to compiler
 optimizations involving invented loads and perhaps also load tearing.
-For example, suppose someone mistakenly does something like this:
+For example, suppose someone mistakenly does something like this::
 
        p = q->rcu_protected_pointer;
        do_something_with(p->a);
        do_something_else_with(p->b);
 
 If register pressure is high, the compiler might optimize "p" out
-of existence, transforming the code to something like this:
+of existence, transforming the code to something like this::
 
        do_something_with(q->rcu_protected_pointer->a);
        do_something_else_with(q->rcu_protected_pointer->b);
@@ -435,7 +442,7 @@ Load tearing could of course result in dereferencing a mashup of a pair
 of pointers, which also might fatally disappoint your code.
 
 These problems could have been avoided simply by making the code instead
-read as follows:
+read as follows::
 
        p = rcu_dereference(q->rcu_protected_pointer);
        do_something_with(p->a);
@@ -448,7 +455,7 @@ or as a formal parameter, with "__rcu", which tells sparse to complain if
 this pointer is accessed directly.  It will also cause sparse to complain
 if a pointer not marked with "__rcu" is accessed using rcu_dereference()
 and friends.  For example, ->rcu_protected_pointer might be declared as
-follows:
+follows::
 
        struct foo __rcu *rcu_protected_pointer;
 
similarity index 72%
rename from Documentation/RCU/rcubarrier.txt
rename to Documentation/RCU/rcubarrier.rst
index a2782df697328e3293769b429b5321c82e0e0b16..f64f4413a47c4583d0de3ea94dc0f305cabbfcb0 100644 (file)
@@ -1,4 +1,7 @@
+.. _rcu_barrier:
+
 RCU and Unloadable Modules
+==========================
 
 [Originally published in LWN Jan. 14, 2007: http://lwn.net/Articles/217484/]
 
@@ -21,7 +24,7 @@ given that readers might well leave absolutely no trace of their
 presence? There is a synchronize_rcu() primitive that blocks until all
 pre-existing readers have completed. An updater wishing to delete an
 element p from a linked list might do the following, while holding an
-appropriate lock, of course:
+appropriate lock, of course::
 
        list_del_rcu(p);
        synchronize_rcu();
@@ -32,13 +35,13 @@ primitive must be used instead. This primitive takes a pointer to an
 rcu_head struct placed within the RCU-protected data structure and
 another pointer to a function that may be invoked later to free that
 structure. Code to delete an element p from the linked list from IRQ
-context might then be as follows:
+context might then be as follows::
 
        list_del_rcu(p);
        call_rcu(&p->rcu, p_callback);
 
 Since call_rcu() never blocks, this code can safely be used from within
-IRQ context. The function p_callback() might be defined as follows:
+IRQ context. The function p_callback() might be defined as follows::
 
        static void p_callback(struct rcu_head *rp)
        {
@@ -49,6 +52,7 @@ IRQ context. The function p_callback() might be defined as follows:
 
 
 Unloading Modules That Use call_rcu()
+-------------------------------------
 
 But what if p_callback is defined in an unloadable module?
 
@@ -69,10 +73,11 @@ in realtime kernels in order to avoid excessive scheduling latencies.
 
 
 rcu_barrier()
+-------------
 
 We instead need the rcu_barrier() primitive.  Rather than waiting for
 a grace period to elapse, rcu_barrier() waits for all outstanding RCU
-callbacks to complete.  Please note that rcu_barrier() does -not- imply
+callbacks to complete.  Please note that rcu_barrier() does **not** imply
 synchronize_rcu(), in particular, if there are no RCU callbacks queued
 anywhere, rcu_barrier() is within its rights to return immediately,
 without waiting for a grace period to elapse.
@@ -88,79 +93,79 @@ must match the flavor of rcu_barrier() with that of call_rcu().  If your
 module uses multiple flavors of call_rcu(), then it must also use multiple
 flavors of rcu_barrier() when unloading that module.  For example, if
 it uses call_rcu(), call_srcu() on srcu_struct_1, and call_srcu() on
-srcu_struct_2(), then the following three lines of code will be required
-when unloading:
+srcu_struct_2, then the following three lines of code will be required
+when unloading::
 
  1 rcu_barrier();
  2 srcu_barrier(&srcu_struct_1);
  3 srcu_barrier(&srcu_struct_2);
 
 The rcutorture module makes use of rcu_barrier() in its exit function
-as follows:
+as follows::
 
- 1 static void
- 2 rcu_torture_cleanup(void)
- 3 {
- 4   int i;
+ 1  static void
+ 2  rcu_torture_cleanup(void)
+ 3  {
+ 4    int i;
  5
- 6   fullstop = 1;
- 7   if (shuffler_task != NULL) {
+ 6    fullstop = 1;
+ 7    if (shuffler_task != NULL) {
  8     VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
  9     kthread_stop(shuffler_task);
-10   }
-11   shuffler_task = NULL;
-12
-13   if (writer_task != NULL) {
-14     VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
-15     kthread_stop(writer_task);
-16   }
-17   writer_task = NULL;
-18
-19   if (reader_tasks != NULL) {
-20     for (i = 0; i < nrealreaders; i++) {
-21       if (reader_tasks[i] != NULL) {
-22         VERBOSE_PRINTK_STRING(
-23           "Stopping rcu_torture_reader task");
-24         kthread_stop(reader_tasks[i]);
-25       }
-26       reader_tasks[i] = NULL;
-27     }
-28     kfree(reader_tasks);
-29     reader_tasks = NULL;
-30   }
-31   rcu_torture_current = NULL;
-32
-33   if (fakewriter_tasks != NULL) {
-34     for (i = 0; i < nfakewriters; i++) {
-35       if (fakewriter_tasks[i] != NULL) {
-36         VERBOSE_PRINTK_STRING(
-37           "Stopping rcu_torture_fakewriter task");
-38         kthread_stop(fakewriter_tasks[i]);
-39       }
-40       fakewriter_tasks[i] = NULL;
-41     }
-42     kfree(fakewriter_tasks);
-43     fakewriter_tasks = NULL;
-44   }
-45
-46   if (stats_task != NULL) {
-47     VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
-48     kthread_stop(stats_task);
-49   }
-50   stats_task = NULL;
-51
-52   /* Wait for all RCU callbacks to fire. */
-53   rcu_barrier();
-54
-55   rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
-56
-57   if (cur_ops->cleanup != NULL)
-58     cur_ops->cleanup();
-59   if (atomic_read(&n_rcu_torture_error))
-60     rcu_torture_print_module_parms("End of test: FAILURE");
-61   else
-62     rcu_torture_print_module_parms("End of test: SUCCESS");
-63 }
+ 10   }
+ 11   shuffler_task = NULL;
+ 12
+ 13   if (writer_task != NULL) {
+ 14     VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
+ 15     kthread_stop(writer_task);
+ 16   }
+ 17   writer_task = NULL;
+ 18
+ 19   if (reader_tasks != NULL) {
+ 20     for (i = 0; i < nrealreaders; i++) {
+ 21       if (reader_tasks[i] != NULL) {
+ 22         VERBOSE_PRINTK_STRING(
+ 23           "Stopping rcu_torture_reader task");
+ 24         kthread_stop(reader_tasks[i]);
+ 25       }
+ 26       reader_tasks[i] = NULL;
+ 27     }
+ 28     kfree(reader_tasks);
+ 29     reader_tasks = NULL;
+ 30   }
+ 31   rcu_torture_current = NULL;
+ 32
+ 33   if (fakewriter_tasks != NULL) {
+ 34     for (i = 0; i < nfakewriters; i++) {
+ 35       if (fakewriter_tasks[i] != NULL) {
+ 36         VERBOSE_PRINTK_STRING(
+ 37           "Stopping rcu_torture_fakewriter task");
+ 38         kthread_stop(fakewriter_tasks[i]);
+ 39       }
+ 40       fakewriter_tasks[i] = NULL;
+ 41     }
+ 42     kfree(fakewriter_tasks);
+ 43     fakewriter_tasks = NULL;
+ 44   }
+ 45
+ 46   if (stats_task != NULL) {
+ 47     VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
+ 48     kthread_stop(stats_task);
+ 49   }
+ 50   stats_task = NULL;
+ 51
+ 52   /* Wait for all RCU callbacks to fire. */
+ 53   rcu_barrier();
+ 54
+ 55   rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+ 56
+ 57   if (cur_ops->cleanup != NULL)
+ 58     cur_ops->cleanup();
+ 59   if (atomic_read(&n_rcu_torture_error))
+ 60     rcu_torture_print_module_parms("End of test: FAILURE");
+ 61   else
+ 62     rcu_torture_print_module_parms("End of test: SUCCESS");
+ 63 }
 
 Line 6 sets a global variable that prevents any RCU callbacks from
 re-posting themselves. This will not be necessary in most cases, since
@@ -176,9 +181,14 @@ for any pre-existing callbacks to complete.
 Then lines 55-62 print status and do operation-specific cleanup, and
 then return, permitting the module-unload operation to be completed.
 
-Quick Quiz #1: Is there any other situation where rcu_barrier() might
+.. _rcubarrier_quiz_1:
+
+Quick Quiz #1:
+       Is there any other situation where rcu_barrier() might
        be required?
 
+:ref:`Answer to Quick Quiz #1 <answer_rcubarrier_quiz_1>`
+
 Your module might have additional complications. For example, if your
 module invokes call_rcu() from timers, you will need to first cancel all
 the timers, and only then invoke rcu_barrier() to wait for any remaining
@@ -188,11 +198,12 @@ Of course, if you module uses call_rcu(), you will need to invoke
 rcu_barrier() before unloading.  Similarly, if your module uses
 call_srcu(), you will need to invoke srcu_barrier() before unloading,
 and on the same srcu_struct structure.  If your module uses call_rcu()
--and- call_srcu(), then you will need to invoke rcu_barrier() -and-
+**and** call_srcu(), then you will need to invoke rcu_barrier() **and**
 srcu_barrier().
 
 
 Implementing rcu_barrier()
+--------------------------
 
 Dipankar Sarma's implementation of rcu_barrier() makes use of the fact
 that RCU callbacks are never reordered once queued on one of the per-CPU
@@ -200,19 +211,19 @@ queues. His implementation queues an RCU callback on each of the per-CPU
 callback queues, and then waits until they have all started executing, at
 which point, all earlier RCU callbacks are guaranteed to have completed.
 
-The original code for rcu_barrier() was as follows:
+The original code for rcu_barrier() was as follows::
 
- 1 void rcu_barrier(void)
- 2 {
- 3   BUG_ON(in_interrupt());
- 4   /* Take cpucontrol mutex to protect against CPU hotplug */
- 5   mutex_lock(&rcu_barrier_mutex);
- 6   init_completion(&rcu_barrier_completion);
- 7   atomic_set(&rcu_barrier_cpu_count, 0);
- 8   on_each_cpu(rcu_barrier_func, NULL, 0, 1);
- 9   wait_for_completion(&rcu_barrier_completion);
-10   mutex_unlock(&rcu_barrier_mutex);
-11 }
+ 1  void rcu_barrier(void)
+ 2  {
+ 3    BUG_ON(in_interrupt());
+ 4    /* Take cpucontrol mutex to protect against CPU hotplug */
+ 5    mutex_lock(&rcu_barrier_mutex);
+ 6    init_completion(&rcu_barrier_completion);
+ 7    atomic_set(&rcu_barrier_cpu_count, 0);
+ 8    on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+ 9    wait_for_completion(&rcu_barrier_completion);
+ 10   mutex_unlock(&rcu_barrier_mutex);
+ 11 }
 
 Line 3 verifies that the caller is in process context, and lines 5 and 10
 use rcu_barrier_mutex to ensure that only one rcu_barrier() is using the
@@ -226,18 +237,18 @@ This code was rewritten in 2008 and several times thereafter, but this
 still gives the general idea.
 
 The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
-to post an RCU callback, as follows:
+to post an RCU callback, as follows::
 
- 1 static void rcu_barrier_func(void *notused)
- 2 {
- 3 int cpu = smp_processor_id();
- 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- 5 struct rcu_head *head;
+ 1  static void rcu_barrier_func(void *notused)
+ 2  {
+ 3    int cpu = smp_processor_id();
+ 4    struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ 5    struct rcu_head *head;
  6
- 7 head = &rdp->barrier;
- 8 atomic_inc(&rcu_barrier_cpu_count);
- 9 call_rcu(head, rcu_barrier_callback);
-10 }
+ 7    head = &rdp->barrier;
+ 8    atomic_inc(&rcu_barrier_cpu_count);
+ 9    call_rcu(head, rcu_barrier_callback);
+ 10 }
 
 Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure,
 which contains the struct rcu_head that needed for the later call to
@@ -248,20 +259,25 @@ the current CPU's queue.
 
 The rcu_barrier_callback() function simply atomically decrements the
 rcu_barrier_cpu_count variable and finalizes the completion when it
-reaches zero, as follows:
+reaches zero, as follows::
 
  1 static void rcu_barrier_callback(struct rcu_head *notused)
  2 {
- 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
- 4 complete(&rcu_barrier_completion);
+ 3   if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+ 4     complete(&rcu_barrier_completion);
  5 }
 
-Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
+.. _rcubarrier_quiz_2:
+
+Quick Quiz #2:
+       What happens if CPU 0's rcu_barrier_func() executes
        immediately (thus incrementing rcu_barrier_cpu_count to the
        value one), but the other CPU's rcu_barrier_func() invocations
        are delayed for a full grace period? Couldn't this result in
        rcu_barrier() returning prematurely?
 
+:ref:`Answer to Quick Quiz #2 <answer_rcubarrier_quiz_2>`
+
 The current rcu_barrier() implementation is more complex, due to the need
 to avoid disturbing idle CPUs (especially on battery-powered systems)
 and the need to minimally disturb non-idle CPUs in real-time systems.
@@ -269,6 +285,7 @@ However, the code above illustrates the concepts.
 
 
 rcu_barrier() Summary
+---------------------
 
 The rcu_barrier() primitive has seen relatively little use, since most
 code using RCU is in the core kernel rather than in modules. However, if
@@ -277,8 +294,12 @@ so that your module may be safely unloaded.
 
 
 Answers to Quick Quizzes
+------------------------
+
+.. _answer_rcubarrier_quiz_1:
 
-Quick Quiz #1: Is there any other situation where rcu_barrier() might
+Quick Quiz #1:
+       Is there any other situation where rcu_barrier() might
        be required?
 
 Answer: Interestingly enough, rcu_barrier() was not originally
@@ -292,7 +313,12 @@ Answer: Interestingly enough, rcu_barrier() was not originally
        implementing rcutorture, and found that rcu_barrier() solves
        this problem as well.
 
-Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
+:ref:`Back to Quick Quiz #1 <rcubarrier_quiz_1>`
+
+.. _answer_rcubarrier_quiz_2:
+
+Quick Quiz #2:
+       What happens if CPU 0's rcu_barrier_func() executes
        immediately (thus incrementing rcu_barrier_cpu_count to the
        value one), but the other CPU's rcu_barrier_func() invocations
        are delayed for a full grace period? Couldn't this result in
@@ -323,3 +349,5 @@ Answer: This cannot happen. The reason is that on_each_cpu() has its last
        is to add an rcu_read_lock() before line 8 of rcu_barrier()
        and an rcu_read_unlock() after line 8 of this same function. If
        you can think of a better change, please let me know!
+
+:ref:`Back to Quick Quiz #2 <rcubarrier_quiz_2>`
index f48f4621ccbc2b261e6e9e924ca19b9f508c02a5..a360a8796710a2c7e7d5c7571d7562a83c4d86ef 100644 (file)
@@ -225,18 +225,13 @@ an estimate of the total number of RCU callbacks queued across all CPUs
 In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
 for each CPU:
 
-       0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 Nonlazy posted: ..D
+       0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 dyntick_enabled: 1
 
 The "last_accelerate:" prints the low-order 16 bits (in hex) of the
 jiffies counter when this CPU last invoked rcu_try_advance_all_cbs()
 from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from
-rcu_prepare_for_idle().  The "Nonlazy posted:" indicates lazy-callback
-status, so that an "l" indicates that all callbacks were lazy at the start
-of the last idle period and an "L" indicates that there are currently
-no non-lazy callbacks (in both cases, "." is printed otherwise, as
-shown above) and "D" indicates that dyntick-idle processing is enabled
-("." is printed otherwise, for example, if disabled via the "nohz="
-kernel boot parameter).
+rcu_prepare_for_idle(). "dyntick_enabled: 1" indicates that dyntick-idle
+processing is enabled.
 
 If the grace period ends just as the stall warning starts printing,
 there will be a spurious stall-warning message, which will include
similarity index 84%
rename from Documentation/RCU/whatisRCU.txt
rename to Documentation/RCU/whatisRCU.rst
index 58ba05c4d97f91904234417275fae88092685dcd..c7f147b8034f0223dac324c0d044bdc30e97761f 100644 (file)
@@ -1,15 +1,18 @@
+.. _whatisrcu_doc:
+
 What is RCU?  --  "Read, Copy, Update"
+======================================
 
 Please note that the "What is RCU?" LWN series is an excellent place
 to start learning about RCU:
 
-1.     What is RCU, Fundamentally?  http://lwn.net/Articles/262464/
-2.     What is RCU? Part 2: Usage   http://lwn.net/Articles/263130/
-3.     RCU part 3: the RCU API      http://lwn.net/Articles/264090/
-4.     The RCU API, 2010 Edition    http://lwn.net/Articles/418853/
-       2010 Big API Table           http://lwn.net/Articles/419086/
-5.     The RCU API, 2014 Edition    http://lwn.net/Articles/609904/
-       2014 Big API Table           http://lwn.net/Articles/609973/
+| 1.   What is RCU, Fundamentally?  http://lwn.net/Articles/262464/
+| 2.   What is RCU? Part 2: Usage   http://lwn.net/Articles/263130/
+| 3.   RCU part 3: the RCU API      http://lwn.net/Articles/264090/
+| 4.   The RCU API, 2010 Edition    http://lwn.net/Articles/418853/
+|      2010 Big API Table           http://lwn.net/Articles/419086/
+| 5.   The RCU API, 2014 Edition    http://lwn.net/Articles/609904/
+|      2014 Big API Table           http://lwn.net/Articles/609973/
 
 
 What is RCU?
@@ -24,14 +27,21 @@ the experience has been that different people must take different paths
 to arrive at an understanding of RCU.  This document provides several
 different paths, as follows:
 
-1.     RCU OVERVIEW
-2.     WHAT IS RCU'S CORE API?
-3.     WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
-4.     WHAT IF MY UPDATING THREAD CANNOT BLOCK?
-5.     WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
-6.     ANALOGY WITH READER-WRITER LOCKING
-7.     FULL LIST OF RCU APIs
-8.     ANSWERS TO QUICK QUIZZES
+:ref:`1.       RCU OVERVIEW <1_whatisRCU>`
+
+:ref:`2.       WHAT IS RCU'S CORE API? <2_whatisRCU>`
+
+:ref:`3.       WHAT ARE SOME EXAMPLE USES OF CORE RCU API? <3_whatisRCU>`
+
+:ref:`4.       WHAT IF MY UPDATING THREAD CANNOT BLOCK? <4_whatisRCU>`
+
+:ref:`5.       WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU? <5_whatisRCU>`
+
+:ref:`6.       ANALOGY WITH READER-WRITER LOCKING <6_whatisRCU>`
+
+:ref:`7.       FULL LIST OF RCU APIs <7_whatisRCU>`
+
+:ref:`8.       ANSWERS TO QUICK QUIZZES <8_whatisRCU>`
 
 People who prefer starting with a conceptual overview should focus on
 Section 1, though most readers will profit by reading this section at
@@ -49,8 +59,10 @@ everything, feel free to read the whole thing -- but if you are really
 that type of person, you have perused the source code and will therefore
 never need this document anyway.  ;-)
 
+.. _1_whatisRCU:
 
 1.  RCU OVERVIEW
+----------------
 
 The basic idea behind RCU is to split updates into "removal" and
 "reclamation" phases.  The removal phase removes references to data items
@@ -116,8 +128,10 @@ So how the heck can a reclaimer tell when a reader is done, given
 that readers are not doing any sort of synchronization operations???
 Read on to learn about how RCU's API makes this easy.
 
+.. _2_whatisRCU:
 
 2.  WHAT IS RCU'S CORE API?
+---------------------------
 
 The core RCU API is quite small:
 
@@ -136,7 +150,7 @@ later.  See the kernel docbook documentation for more info, or look directly
 at the function header comments.
 
 rcu_read_lock()
-
+^^^^^^^^^^^^^^^
        void rcu_read_lock(void);
 
        Used by a reader to inform the reclaimer that the reader is
@@ -150,7 +164,7 @@ rcu_read_lock()
        longer-term references to data structures.
 
 rcu_read_unlock()
-
+^^^^^^^^^^^^^^^^^
        void rcu_read_unlock(void);
 
        Used by a reader to inform the reclaimer that the reader is
@@ -158,15 +172,15 @@ rcu_read_unlock()
        read-side critical sections may be nested and/or overlapping.
 
 synchronize_rcu()
-
+^^^^^^^^^^^^^^^^^
        void synchronize_rcu(void);
 
        Marks the end of updater code and the beginning of reclaimer
        code.  It does this by blocking until all pre-existing RCU
        read-side critical sections on all CPUs have completed.
-       Note that synchronize_rcu() will -not- necessarily wait for
+       Note that synchronize_rcu() will **not** necessarily wait for
        any subsequent RCU read-side critical sections to complete.
-       For example, consider the following sequence of events:
+       For example, consider the following sequence of events::
 
                 CPU 0                  CPU 1                 CPU 2
             ----------------- ------------------------- ---------------
@@ -182,7 +196,7 @@ synchronize_rcu()
        any that begin after synchronize_rcu() is invoked.
 
        Of course, synchronize_rcu() does not necessarily return
-       -immediately- after the last pre-existing RCU read-side critical
+       **immediately** after the last pre-existing RCU read-side critical
        section completes.  For one thing, there might well be scheduling
        delays.  For another thing, many RCU implementations process
        requests in batches in order to improve efficiencies, which can
@@ -211,10 +225,10 @@ synchronize_rcu()
        checklist.txt for some approaches to limiting the update rate.
 
 rcu_assign_pointer()
-
+^^^^^^^^^^^^^^^^^^^^
        void rcu_assign_pointer(p, typeof(p) v);
 
-       Yes, rcu_assign_pointer() -is- implemented as a macro, though it
+       Yes, rcu_assign_pointer() **is** implemented as a macro, though it
        would be cool to be able to declare a function in this manner.
        (Compiler experts will no doubt disagree.)
 
@@ -231,7 +245,7 @@ rcu_assign_pointer()
        the _rcu list-manipulation primitives such as list_add_rcu().
 
 rcu_dereference()
-
+^^^^^^^^^^^^^^^^^
        typeof(p) rcu_dereference(p);
 
        Like rcu_assign_pointer(), rcu_dereference() must be implemented
@@ -248,13 +262,13 @@ rcu_dereference()
 
        Common coding practice uses rcu_dereference() to copy an
        RCU-protected pointer to a local variable, then dereferences
-       this local variable, for example as follows:
+       this local variable, for example as follows::
 
                p = rcu_dereference(head.next);
                return p->data;
 
        However, in this case, one could just as easily combine these
-       into one statement:
+       into one statement::
 
                return rcu_dereference(head.next)->data;
 
@@ -266,8 +280,8 @@ rcu_dereference()
        unnecessary overhead on Alpha CPUs.
 
        Note that the value returned by rcu_dereference() is valid
-       only within the enclosing RCU read-side critical section [1].
-       For example, the following is -not- legal:
+       only within the enclosing RCU read-side critical section [1]_.
+       For example, the following is **not** legal::
 
                rcu_read_lock();
                p = rcu_dereference(head.next);
@@ -290,9 +304,9 @@ rcu_dereference()
        at any time, including immediately after the rcu_dereference().
        And, again like rcu_assign_pointer(), rcu_dereference() is
        typically used indirectly, via the _rcu list-manipulation
-       primitives, such as list_for_each_entry_rcu() [2].
+       primitives, such as list_for_each_entry_rcu() [2]_.
 
-       [1] The variant rcu_dereference_protected() can be used outside
+..     [1] The variant rcu_dereference_protected() can be used outside
        of an RCU read-side critical section as long as the usage is
        protected by locks acquired by the update-side code.  This variant
        avoids the lockdep warning that would happen when using (for
@@ -305,7 +319,7 @@ rcu_dereference()
        a lockdep splat is emitted.  See Documentation/RCU/Design/Requirements/Requirements.rst
        and the API's code comments for more details and example usage.
 
-       [2] If the list_for_each_entry_rcu() instance might be used by
+..     [2] If the list_for_each_entry_rcu() instance might be used by
        update-side code as well as by RCU readers, then an additional
        lockdep expression can be added to its list of arguments.
        For example, given an additional "lock_is_held(&mylock)" argument,
@@ -315,6 +329,7 @@ rcu_dereference()
 
 The following diagram shows how each API communicates among the
 reader, updater, and reclaimer.
+::
 
 
            rcu_assign_pointer()
@@ -375,12 +390,16 @@ c.        RCU applied to scheduler and interrupt/NMI-handler tasks.
 Again, most uses will be of (a).  The (b) and (c) cases are important
 for specialized uses, but are relatively uncommon.
 
+.. _3_whatisRCU:
 
 3.  WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+-----------------------------------------------
 
 This section shows a simple use of the core RCU API to protect a
 global pointer to a dynamically allocated structure.  More-typical
-uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
+uses of RCU may be found in :ref:`listRCU.rst <list_rcu_doc>`,
+:ref:`arrayRCU.rst <array_rcu_doc>`, and :ref:`NMI-RCU.rst <NMI_rcu_doc>`.
+::
 
        struct foo {
                int a;
@@ -440,40 +459,43 @@ uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
 
 So, to sum up:
 
-o      Use rcu_read_lock() and rcu_read_unlock() to guard RCU
+-      Use rcu_read_lock() and rcu_read_unlock() to guard RCU
        read-side critical sections.
 
-o      Within an RCU read-side critical section, use rcu_dereference()
+-      Within an RCU read-side critical section, use rcu_dereference()
        to dereference RCU-protected pointers.
 
-o      Use some solid scheme (such as locks or semaphores) to
+-      Use some solid scheme (such as locks or semaphores) to
        keep concurrent updates from interfering with each other.
 
-o      Use rcu_assign_pointer() to update an RCU-protected pointer.
+-      Use rcu_assign_pointer() to update an RCU-protected pointer.
        This primitive protects concurrent readers from the updater,
-       -not- concurrent updates from each other!  You therefore still
+       **not** concurrent updates from each other!  You therefore still
        need to use locking (or something similar) to keep concurrent
        rcu_assign_pointer() primitives from interfering with each other.
 
-o      Use synchronize_rcu() -after- removing a data element from an
-       RCU-protected data structure, but -before- reclaiming/freeing
+-      Use synchronize_rcu() **after** removing a data element from an
+       RCU-protected data structure, but **before** reclaiming/freeing
        the data element, in order to wait for the completion of all
        RCU read-side critical sections that might be referencing that
        data item.
 
 See checklist.txt for additional rules to follow when using RCU.
-And again, more-typical uses of RCU may be found in listRCU.txt,
-arrayRCU.txt, and NMI-RCU.txt.
+And again, more-typical uses of RCU may be found in :ref:`listRCU.rst
+<list_rcu_doc>`, :ref:`arrayRCU.rst <array_rcu_doc>`, and :ref:`NMI-RCU.rst
+<NMI_rcu_doc>`.
 
+.. _4_whatisRCU:
 
 4.  WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+--------------------------------------------
 
 In the example above, foo_update_a() blocks until a grace period elapses.
 This is quite simple, but in some cases one cannot afford to wait so
 long -- there might be other high-priority work to be done.
 
 In such cases, one uses call_rcu() rather than synchronize_rcu().
-The call_rcu() API is as follows:
+The call_rcu() API is as follows::
 
        void call_rcu(struct rcu_head * head,
                      void (*func)(struct rcu_head *head));
@@ -481,7 +503,7 @@ The call_rcu() API is as follows:
 This function invokes func(head) after a grace period has elapsed.
 This invocation might happen from either softirq or process context,
 so the function is not permitted to block.  The foo struct needs to
-have an rcu_head structure added, perhaps as follows:
+have an rcu_head structure added, perhaps as follows::
 
        struct foo {
                int a;
@@ -490,7 +512,7 @@ have an rcu_head structure added, perhaps as follows:
                struct rcu_head rcu;
        };
 
-The foo_update_a() function might then be written as follows:
+The foo_update_a() function might then be written as follows::
 
        /*
         * Create a new struct foo that is the same as the one currently
@@ -520,7 +542,7 @@ The foo_update_a() function might then be written as follows:
                call_rcu(&old_fp->rcu, foo_reclaim);
        }
 
-The foo_reclaim() function might appear as follows:
+The foo_reclaim() function might appear as follows::
 
        void foo_reclaim(struct rcu_head *rp)
        {
@@ -544,7 +566,7 @@ namely foo_reclaim().
 The summary of advice is the same as for the previous section, except
 that we are now using call_rcu() rather than synchronize_rcu():
 
-o      Use call_rcu() -after- removing a data element from an
+-      Use call_rcu() **after** removing a data element from an
        RCU-protected data structure in order to register a callback
        function that will be invoked after the completion of all RCU
        read-side critical sections that might be referencing that
@@ -552,14 +574,16 @@ o Use call_rcu() -after- removing a data element from an
 
 If the callback for call_rcu() is not doing anything more than calling
 kfree() on the structure, you can use kfree_rcu() instead of call_rcu()
-to avoid having to write your own callback:
+to avoid having to write your own callback::
 
        kfree_rcu(old_fp, rcu);
 
 Again, see checklist.txt for additional rules governing the use of RCU.
 
+.. _5_whatisRCU:
 
 5.  WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+------------------------------------------------
 
 One of the nice things about RCU is that it has extremely simple "toy"
 implementations that are a good first step towards understanding the
@@ -579,7 +603,7 @@ more details on the current implementation as of early 2004.
 
 
 5A.  "TOY" IMPLEMENTATION #1: LOCKING
-
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 This section presents a "toy" RCU implementation that is based on
 familiar locking primitives.  Its overhead makes it a non-starter for
 real-life use, as does its lack of scalability.  It is also unsuitable
@@ -591,7 +615,7 @@ you allow nested rcu_read_lock() calls, you can deadlock.
 However, it is probably the easiest implementation to relate to, so is
 a good starting point.
 
-It is extremely simple:
+It is extremely simple::
 
        static DEFINE_RWLOCK(rcu_gp_mutex);
 
@@ -614,7 +638,7 @@ It is extremely simple:
 
 [You can ignore rcu_assign_pointer() and rcu_dereference() without missing
 much.  But here are simplified versions anyway.  And whatever you do,
-don't forget about them when submitting patches making use of RCU!]
+don't forget about them when submitting patches making use of RCU!]::
 
        #define rcu_assign_pointer(p, v) \
        ({ \
@@ -647,18 +671,23 @@ that the only thing that can block rcu_read_lock() is a synchronize_rcu().
 But synchronize_rcu() does not acquire any locks while holding rcu_gp_mutex,
 so there can be no deadlock cycle.
 
-Quick Quiz #1: Why is this argument naive?  How could a deadlock
+.. _quiz_1:
+
+Quick Quiz #1:
+               Why is this argument naive?  How could a deadlock
                occur when using this algorithm in a real-world Linux
                kernel?  How could this deadlock be avoided?
 
+:ref:`Answers to Quick Quiz <8_whatisRCU>`
 
 5B.  "TOY" EXAMPLE #2: CLASSIC RCU
-
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 This section presents a "toy" RCU implementation that is based on
 "classic RCU".  It is also short on performance (but only for updates) and
 on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT
 kernels.  The definitions of rcu_dereference() and rcu_assign_pointer()
 are the same as those shown in the preceding section, so they are omitted.
+::
 
        void rcu_read_lock(void) { }
 
@@ -683,14 +712,14 @@ CPU in turn.  The run_on() primitive can be implemented straightforwardly
 in terms of the sched_setaffinity() primitive.  Of course, a somewhat less
 "toy" implementation would restore the affinity upon completion rather
 than just leaving all tasks running on the last CPU, but when I said
-"toy", I meant -toy-!
+"toy", I meant **toy**!
 
 So how the heck is this supposed to work???
 
 Remember that it is illegal to block while in an RCU read-side critical
 section.  Therefore, if a given CPU executes a context switch, we know
 that it must have completed all preceding RCU read-side critical sections.
-Once -all- CPUs have executed a context switch, then -all- preceding
+Once **all** CPUs have executed a context switch, then **all** preceding
 RCU read-side critical sections will have completed.
 
 So, suppose that we remove a data item from its structure and then invoke
@@ -698,19 +727,32 @@ synchronize_rcu().  Once synchronize_rcu() returns, we are guaranteed
 that there are no RCU read-side critical sections holding a reference
 to that data item, so we can safely reclaim it.
 
-Quick Quiz #2: Give an example where Classic RCU's read-side
-               overhead is -negative-.
+.. _quiz_2:
+
+Quick Quiz #2:
+               Give an example where Classic RCU's read-side
+               overhead is **negative**.
+
+:ref:`Answers to Quick Quiz <8_whatisRCU>`
 
-Quick Quiz #3:  If it is illegal to block in an RCU read-side
+.. _quiz_3:
+
+Quick Quiz #3:
+               If it is illegal to block in an RCU read-side
                critical section, what the heck do you do in
                PREEMPT_RT, where normal spinlocks can block???
 
+:ref:`Answers to Quick Quiz <8_whatisRCU>`
+
+.. _6_whatisRCU:
 
 6.  ANALOGY WITH READER-WRITER LOCKING
+--------------------------------------
 
 Although RCU can be used in many different ways, a very common use of
 RCU is analogous to reader-writer locking.  The following unified
 diff shows how closely related RCU and reader-writer locking can be.
+::
 
        @@ -5,5 +5,5 @@ struct el {
                int data;
@@ -762,7 +804,7 @@ diff shows how closely related RCU and reader-writer locking can be.
                return 0;
         }
 
-Or, for those who prefer a side-by-side listing:
+Or, for those who prefer a side-by-side listing::
 
  1 struct el {                          1 struct el {
  2   struct list_head list;             2   struct list_head list;
@@ -774,40 +816,44 @@ Or, for those who prefer a side-by-side listing:
  8 rwlock_t listmutex;                  8 spinlock_t listmutex;
  9 struct el head;                      9 struct el head;
 
- 1 int search(long key, int *result)    1 int search(long key, int *result)
- 2 {                                    2 {
- 3   struct list_head *lp;              3   struct list_head *lp;
- 4   struct el *p;                      4   struct el *p;
- 5                                      5
- 6   read_lock(&listmutex);             6   rcu_read_lock();
- 7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
- 8     if (p->key == key) {             8     if (p->key == key) {
- 9       *result = p->data;             9       *result = p->data;
-10       read_unlock(&listmutex);      10       rcu_read_unlock();
-11       return 1;                     11       return 1;
-12     }                               12     }
-13   }                                 13   }
-14   read_unlock(&listmutex);          14   rcu_read_unlock();
-15   return 0;                         15   return 0;
-16 }                                   16 }
-
- 1 int delete(long key)                 1 int delete(long key)
- 2 {                                    2 {
- 3   struct el *p;                      3   struct el *p;
- 4                                      4
- 5   write_lock(&listmutex);            5   spin_lock(&listmutex);
- 6   list_for_each_entry(p, head, lp) { 6   list_for_each_entry(p, head, lp) {
- 7     if (p->key == key) {             7     if (p->key == key) {
- 8       list_del(&p->list);            8       list_del_rcu(&p->list);
- 9       write_unlock(&listmutex);      9       spin_unlock(&listmutex);
-                                       10       synchronize_rcu();
-10       kfree(p);                     11       kfree(p);
-11       return 1;                     12       return 1;
-12     }                               13     }
-13   }                                 14   }
-14   write_unlock(&listmutex);         15   spin_unlock(&listmutex);
-15   return 0;                         16   return 0;
-16 }                                   17 }
+::
+
+  1 int search(long key, int *result)    1 int search(long key, int *result)
+  2 {                                    2 {
+  3   struct list_head *lp;              3   struct list_head *lp;
+  4   struct el *p;                      4   struct el *p;
+  5                                      5
+  6   read_lock(&listmutex);             6   rcu_read_lock();
+  7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
+  8     if (p->key == key) {             8     if (p->key == key) {
+  9       *result = p->data;             9       *result = p->data;
+ 10       read_unlock(&listmutex);      10       rcu_read_unlock();
+ 11       return 1;                     11       return 1;
+ 12     }                               12     }
+ 13   }                                 13   }
+ 14   read_unlock(&listmutex);          14   rcu_read_unlock();
+ 15   return 0;                         15   return 0;
+ 16 }                                   16 }
+
+::
+
+  1 int delete(long key)                 1 int delete(long key)
+  2 {                                    2 {
+  3   struct el *p;                      3   struct el *p;
+  4                                      4
+  5   write_lock(&listmutex);            5   spin_lock(&listmutex);
+  6   list_for_each_entry(p, head, lp) { 6   list_for_each_entry(p, head, lp) {
+  7     if (p->key == key) {             7     if (p->key == key) {
+  8       list_del(&p->list);            8       list_del_rcu(&p->list);
+  9       write_unlock(&listmutex);      9       spin_unlock(&listmutex);
+                                        10       synchronize_rcu();
+ 10       kfree(p);                     11       kfree(p);
+ 11       return 1;                     12       return 1;
+ 12     }                               13     }
+ 13   }                                 14   }
+ 14   write_unlock(&listmutex);         15   spin_unlock(&listmutex);
+ 15   return 0;                         16   return 0;
+ 16 }                                   17 }
 
 Either way, the differences are quite small.  Read-side locking moves
 to rcu_read_lock() and rcu_read_unlock, update-side locking moves from
@@ -825,22 +871,27 @@ delete() can now block.  If this is a problem, there is a callback-based
 mechanism that never blocks, namely call_rcu() or kfree_rcu(), that can
 be used in place of synchronize_rcu().
 
+.. _7_whatisRCU:
 
 7.  FULL LIST OF RCU APIs
+-------------------------
 
 The RCU APIs are documented in docbook-format header comments in the
 Linux-kernel source code, but it helps to have a full list of the
 APIs, since there does not appear to be a way to categorize them
 in docbook.  Here is the list, by category.
 
-RCU list traversal:
+RCU list traversal::
 
        list_entry_rcu
+       list_entry_lockless
        list_first_entry_rcu
        list_next_rcu
        list_for_each_entry_rcu
        list_for_each_entry_continue_rcu
        list_for_each_entry_from_rcu
+       list_first_or_null_rcu
+       list_next_or_null_rcu
        hlist_first_rcu
        hlist_next_rcu
        hlist_pprev_rcu
@@ -854,7 +905,7 @@ RCU list traversal:
        hlist_bl_first_rcu
        hlist_bl_for_each_entry_rcu
 
-RCU pointer/list update:
+RCU pointer/list update::
 
        rcu_assign_pointer
        list_add_rcu
@@ -864,10 +915,12 @@ RCU pointer/list update:
        hlist_add_behind_rcu
        hlist_add_before_rcu
        hlist_add_head_rcu
+       hlist_add_tail_rcu
        hlist_del_rcu
        hlist_del_init_rcu
        hlist_replace_rcu
-       list_splice_init_rcu()
+       list_splice_init_rcu
+       list_splice_tail_init_rcu
        hlist_nulls_del_init_rcu
        hlist_nulls_del_rcu
        hlist_nulls_add_head_rcu
@@ -876,7 +929,9 @@ RCU pointer/list update:
        hlist_bl_del_rcu
        hlist_bl_set_first_rcu
 
-RCU:   Critical sections       Grace period            Barrier
+RCU::
+
+       Critical sections       Grace period            Barrier
 
        rcu_read_lock           synchronize_net         rcu_barrier
        rcu_read_unlock         synchronize_rcu
@@ -885,7 +940,9 @@ RCU:        Critical sections       Grace period            Barrier
        rcu_dereference_check   kfree_rcu
        rcu_dereference_protected
 
-bh:    Critical sections       Grace period            Barrier
+bh::
+
+       Critical sections       Grace period            Barrier
 
        rcu_read_lock_bh        call_rcu                rcu_barrier
        rcu_read_unlock_bh      synchronize_rcu
@@ -896,7 +953,9 @@ bh: Critical sections       Grace period            Barrier
        rcu_dereference_bh_protected
        rcu_read_lock_bh_held
 
-sched: Critical sections       Grace period            Barrier
+sched::
+
+       Critical sections       Grace period            Barrier
 
        rcu_read_lock_sched     call_rcu                rcu_barrier
        rcu_read_unlock_sched   synchronize_rcu
@@ -910,7 +969,9 @@ sched:      Critical sections       Grace period            Barrier
        rcu_read_lock_sched_held
 
 
-SRCU:  Critical sections       Grace period            Barrier
+SRCU::
+
+       Critical sections       Grace period            Barrier
 
        srcu_read_lock          call_srcu               srcu_barrier
        srcu_read_unlock        synchronize_srcu
@@ -918,13 +979,14 @@ SRCU:     Critical sections       Grace period            Barrier
        srcu_dereference_check
        srcu_read_lock_held
 
-SRCU:  Initialization/cleanup
+SRCU: Initialization/cleanup::
+
        DEFINE_SRCU
        DEFINE_STATIC_SRCU
        init_srcu_struct
        cleanup_srcu_struct
 
-All:  lockdep-checked RCU-protected pointer access
+All: lockdep-checked RCU-protected pointer access::
 
        rcu_access_pointer
        rcu_dereference_raw
@@ -974,15 +1036,19 @@ g.       Otherwise, use RCU.
 Of course, this all assumes that you have determined that RCU is in fact
 the right tool for your job.
 
+.. _8_whatisRCU:
 
 8.  ANSWERS TO QUICK QUIZZES
+----------------------------
 
-Quick Quiz #1: Why is this argument naive?  How could a deadlock
+Quick Quiz #1:
+               Why is this argument naive?  How could a deadlock
                occur when using this algorithm in a real-world Linux
                kernel?  [Referring to the lock-based "toy" RCU
                algorithm.]
 
-Answer:                Consider the following sequence of events:
+Answer:
+               Consider the following sequence of events:
 
                1.      CPU 0 acquires some unrelated lock, call it
                        "problematic_lock", disabling irq via
@@ -1021,10 +1087,14 @@ Answer:         Consider the following sequence of events:
                approach where tasks in RCU read-side critical sections
                cannot be blocked by tasks executing synchronize_rcu().
 
-Quick Quiz #2: Give an example where Classic RCU's read-side
-               overhead is -negative-.
+:ref:`Back to Quick Quiz #1 <quiz_1>`
+
+Quick Quiz #2:
+               Give an example where Classic RCU's read-side
+               overhead is **negative**.
 
-Answer:                Imagine a single-CPU system with a non-CONFIG_PREEMPT
+Answer:
+               Imagine a single-CPU system with a non-CONFIG_PREEMPT
                kernel where a routing table is used by process-context
                code, but can be updated by irq-context code (for example,
                by an "ICMP REDIRECT" packet).  The usual way of handling
@@ -1046,11 +1116,15 @@ Answer:         Imagine a single-CPU system with a non-CONFIG_PREEMPT
                even the theoretical possibility of negative overhead for
                a synchronization primitive is a bit unexpected.  ;-)
 
-Quick Quiz #3:  If it is illegal to block in an RCU read-side
+:ref:`Back to Quick Quiz #2 <quiz_2>`
+
+Quick Quiz #3:
+               If it is illegal to block in an RCU read-side
                critical section, what the heck do you do in
                PREEMPT_RT, where normal spinlocks can block???
 
-Answer:                Just as PREEMPT_RT permits preemption of spinlock
+Answer:
+               Just as PREEMPT_RT permits preemption of spinlock
                critical sections, it permits preemption of RCU
                read-side critical sections.  It also permits
                spinlocks blocking while in RCU read-side critical
@@ -1069,6 +1143,7 @@ Answer:           Just as PREEMPT_RT permits preemption of spinlock
                Besides, how does the computer know what pizza parlor
                the human being went to???
 
+:ref:`Back to Quick Quiz #3 <quiz_3>`
 
 ACKNOWLEDGEMENTS
 
diff --git a/Documentation/admin-guide/acpi/fan_performance_states.rst b/Documentation/admin-guide/acpi/fan_performance_states.rst
new file mode 100644 (file)
index 0000000..21d233c
--- /dev/null
@@ -0,0 +1,62 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+ACPI Fan Performance States
+===========================
+
+When the optional _FPS object is present under an ACPI device representing a
+fan (for example, PNP0C0B or INT3404), the ACPI fan driver creates additional
+"state*" attributes in the sysfs directory of the ACPI device in question.
+These attributes list properties of fan performance states.
+
+For more information on _FPS refer to the ACPI specification at:
+
+http://uefi.org/specifications
+
+For instance, the contents of the INT3404 ACPI device sysfs directory
+may look as follows::
+
+ $ ls -l /sys/bus/acpi/devices/INT3404:00/
+ total 0
+...
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state0
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state1
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state10
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state11
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state2
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state3
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state4
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state5
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state6
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state7
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state8
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state9
+ -r--r--r-- 1 root root 4096 Dec 13 01:00 status
+ ...
+
+where each of the "state*" files represents one performance state of the fan
+and contains a colon-separated list of 5 integer numbers (fields) with the
+following interpretation::
+
+control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw
+
+* ``control_percent``: The percent value to be used to set the fan speed to a
+  specific level using the _FSL object (0-100).
+
+* ``trip_point_index``: The active cooling trip point number that corresponds
+  to this performance state (0-9).
+
+* ``speed_rpm``: Speed of the fan in rotations per minute.
+
+* ``noise_level_mdb``: Audible noise emitted by the fan in this state in
+  millidecibels.
+
+* ``power_mw``: Power draw of the fan in this state in milliwatts.
+
+For example::
+
+ $cat /sys/bus/acpi/devices/INT3404:00/state1
+ 25:0:3200:12500:1250
+
+When a given field is not populated or its value provided by the platform
+firmware is invalid, the "not-defined" string is shown instead of the value.
index 4d13eeea1ecac3ca70d8a9214237e0dd2a05d331..71277689ad97f452fb91ff8738601f6e163d1d65 100644 (file)
@@ -12,3 +12,4 @@ the Linux ACPI support.
    dsdt-override
    ssdt-overlays
    cppc_sysfs
+   fan_performance_states
index 0636bcb60b5a3a644d8411ad68d1171b9fbc534c..3f801461f0f3d2b6a3904bcd2d0441770395410b 100644 (file)
@@ -61,6 +61,8 @@ v1 is available under Documentation/admin-guide/cgroup-v1/.
      5-6. Device
      5-7. RDMA
        5-7-1. RDMA Interface Files
+     5-8. HugeTLB
+       5.8-1. HugeTLB Interface Files
      5-8. Misc
        5-8-1. perf_event
      5-N. Non-normative information
@@ -2056,6 +2058,33 @@ RDMA Interface Files
          mlx4_0 hca_handle=1 hca_object=20
          ocrdma1 hca_handle=1 hca_object=23
 
+HugeTLB
+-------
+
+The HugeTLB controller allows to limit the HugeTLB usage per control group and
+enforces the controller limit during page fault.
+
+HugeTLB Interface Files
+~~~~~~~~~~~~~~~~~~~~~~~
+
+  hugetlb.<hugepagesize>.current
+       Show current usage for "hugepagesize" hugetlb.  It exists for all
+       the cgroup except root.
+
+  hugetlb.<hugepagesize>.max
+       Set/show the hard limit of "hugepagesize" hugetlb usage.
+       The default value is "max".  It exists for all the cgroup except root.
+
+  hugetlb.<hugepagesize>.events
+       A read-only flat-keyed file which exists on non-root cgroups.
+
+         max
+               The number of allocation failure due to HugeTLB limit
+
+  hugetlb.<hugepagesize>.events.local
+       Similar to hugetlb.<hugepagesize>.events but the fields in the file
+       are local to the cgroup i.e. not hierarchical. The file modified event
+       generated on this file reflects only the local events.
 
 Misc
 ----
index 594095b54b296567d09d1650d281b5a7e4f44c1f..c00f9f11e3f3f6133d4b741244e034ae2a3c430e 100644 (file)
@@ -144,7 +144,7 @@ journal_crypt:algorithm(:key)       (the key is optional)
        Encrypt the journal using given algorithm to make sure that the
        attacker can't read the journal. You can use a block cipher here
        (such as "cbc(aes)") or a stream cipher (for example "chacha20",
-       "salsa20", "ctr(aes)" or "ecb(arc4)").
+       "salsa20" or "ctr(aes)").
 
        The journal contains history of last writes to the block device,
        an attacker reading the journal could see the last sector nubmers
index 4872fb6d29524593849fbe68491e372d0901dfc3..ec62fcc8eeceed83a1cfd3020ff87595affc6ad0 100644 (file)
@@ -8,6 +8,7 @@ Device Mapper
     cache-policies
     cache
     delay
+    dm-clone
     dm-crypt
     dm-dust
     dm-flakey
index 1c5d2281efc9744df8427f34b99cb6b3883877c4..2a97aaec8b122cd7322744a43413d742a246afa2 100644 (file)
                182 = /dev/perfctr      Performance-monitoring counters
                183 = /dev/hwrng        Generic random number generator
                184 = /dev/cpu/microcode CPU microcode update interface
-               186 = /dev/atomicps     Atomic shapshot of process state data
+               186 = /dev/atomicps     Atomic snapshot of process state data
                187 = /dev/irnet        IrNET device
                188 = /dev/smbusbios    SMBus BIOS
                189 = /dev/ussp_ctl     User space serial port control
index 059ddcbe769d94bfc9a53762939512b02b0d7396..9bc93f0ce0c907fa2cb2b54a86bbb2c7ee417570 100644 (file)
@@ -181,14 +181,17 @@ When mounting an ext4 filesystem, the following option are accepted:
         system after its metadata has been committed to the journal.
 
   commit=nrsec (*)
-        Ext4 can be told to sync all its data and metadata every 'nrsec'
-        seconds. The default value is 5 seconds.  This means that if you lose
-        your power, you will lose as much as the latest 5 seconds of work (your
-        filesystem will not be damaged though, thanks to the journaling).  This
-        default value (or any low value) will hurt performance, but it's good
-        for data-safety.  Setting it to 0 will have the same effect as leaving
-        it at the default (5 seconds).  Setting it to very large values will
-        improve performance.
+        This setting limits the maximum age of the running transaction to
+        'nrsec' seconds.  The default value is 5 seconds.  This means that if
+        you lose your power, you will lose as much as the latest 5 seconds of
+        metadata changes (your filesystem will not be damaged though, thanks
+        to the journaling). This default value (or any low value) will hurt
+        performance, but it's good for data-safety.  Setting it to 0 will have
+        the same effect as leaving it at the default (5 seconds).  Setting it
+        to very large values will improve performance.  Note that due to
+        delayed allocation even older data can be lost on power failure since
+        writeback of those data begins only after time set in
+        /proc/sys/vm/dirty_expire_centisecs.
 
   barrier=<0|1(*)>, barrier(*), nobarrier
         This enables/disables the use of write barriers in the jbd code.
index ade4e6ec23e03acdaa4277cebfbbf059ff23df45..ec92120a795266abd27a115a399ce2c31eb5638b 100644 (file)
                        1 -- check protection requested by application.
                        Default value is set via a kernel config option.
                        Value can be changed at runtime via
-                               /selinux/checkreqprot.
+                               /sys/fs/selinux/checkreqprot.
 
        cio_ignore=     [S390]
                        See Documentation/s390/common_io.rst for details.
 
        efi=            [EFI]
                        Format: { "old_map", "nochunk", "noruntime", "debug",
-                                 "nosoftreserve" }
+                                 "nosoftreserve", "disable_early_pci_dma",
+                                 "no_disable_early_pci_dma" }
                        old_map [X86-64]: switch to the old ioremap-based EFI
-                       runtime services mapping. 32-bit still uses this one by
-                       default.
+                       runtime services mapping. [Needs CONFIG_X86_UV=y]
                        nochunk: disable reading files in "chunks" in the EFI
                        boot stub, as chunking can cause problems with some
                        firmware implementations.
                        claim. Specify efi=nosoftreserve to disable this
                        reservation and treat the memory by its base type
                        (i.e. EFI_CONVENTIONAL_MEMORY / "System RAM").
+                       disable_early_pci_dma: Disable the busmaster bit on all
+                       PCI bridges while in the EFI boot stub
+                       no_disable_early_pci_dma: Leave the busmaster bit set
+                       on all PCI bridges while in the EFI boot stub
 
        efi_no_storage_paranoia [EFI; X86]
                        Using this parameter you can use more than 50% of
                        0 -- permissive (log only, no denials).
                        1 -- enforcing (deny and log).
                        Default value is 0.
-                       Value can be changed at runtime via /selinux/enforce.
+                       Value can be changed at runtime via
+                       /sys/fs/selinux/enforce.
 
        erst_disable    [ACPI]
                        Disable Error Record Serialization Table (ERST)
                          <cpu number> begins at 0 and the maximum value is
                          "number of CPUs in system - 1".
 
-                       The format of <cpu-list> is described above.
-
+                       managed_irq
+
+                         Isolate from being targeted by managed interrupts
+                         which have an interrupt mask containing isolated
+                         CPUs. The affinity of managed interrupts is
+                         handled by the kernel and cannot be changed via
+                         the /proc/irq/* interfaces.
+
+                         This isolation is best effort and only effective
+                         if the automatically assigned interrupt mask of a
+                         device queue contains isolated and housekeeping
+                         CPUs. If housekeeping CPUs are online then such
+                         interrupts are directed to the housekeeping CPU
+                         so that IO submitted on the housekeeping CPU
+                         cannot disturb the isolated CPU.
+
+                         If a queue's affinity mask contains only isolated
+                         CPUs then this parameter has no effect on the
+                         interrupt routing decision, though interrupts are
+                         only delivered when tasks running on those
+                         isolated CPUs submit IO. IO submitted on
+                         housekeeping CPUs has no influence on those
+                         queues.
 
+                       The format of <cpu-list> is described above.
 
        iucv=           [HW,NET]
 
                        test until boot completes in order to avoid
                        interference.
 
+       rcuperf.kfree_rcu_test= [KNL]
+                       Set to measure performance of kfree_rcu() flooding.
+
+       rcuperf.kfree_nthreads= [KNL]
+                       The number of threads running loops of kfree_rcu().
+
+       rcuperf.kfree_alloc_num= [KNL]
+                       Number of allocations and frees done in an iteration.
+
+       rcuperf.kfree_loops= [KNL]
+                       Number of loops doing rcuperf.kfree_alloc_num number
+                       of allocations and frees.
+
        rcuperf.nreaders= [KNL]
                        Set number of RCU readers.  The value -1 selects
                        N, where N is the number of CPUs.  A value
                        See security/selinux/Kconfig help text.
                        0 -- disable.
                        1 -- enable.
-                       Default value is set via kernel config option.
-                       If enabled at boot time, /selinux/disable can be used
-                       later to disable prior to initial policy load.
+                       Default value is 1.
 
        apparmor=       [APPARMOR] Disable or enable AppArmor at boot time
                        Format: { "0" | "1" }
index e70b365dbc6030e758c20f6aa6165498393dd42d..311cd7cc2b75854f337e54e7a6db2b1bc98cd31d 100644 (file)
@@ -506,6 +506,9 @@ object corresponding to it, as follows:
 ``disable``
        Whether or not this idle state is disabled.
 
+``default_status``
+       The default status of this state, "enabled" or "disabled".
+
 ``latency``
        Exit latency of the idle state in microseconds.
 
diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst
new file mode 100644 (file)
index 0000000..afbf778
--- /dev/null
@@ -0,0 +1,246 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==============================================
+``intel_idle`` CPU Idle Time Management Driver
+==============================================
+
+:Copyright: |copy| 2020 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+General Information
+===================
+
+``intel_idle`` is a part of the
+:doc:`CPU idle time management subsystem <cpuidle>` in the Linux kernel
+(``CPUIdle``).  It is the default CPU idle time management driver for the
+Nehalem and later generations of Intel processors, but the level of support for
+a particular processor model in it depends on whether or not it recognizes that
+processor model and may also depend on information coming from the platform
+firmware.  [To understand ``intel_idle`` it is necessary to know how ``CPUIdle``
+works in general, so this is the time to get familiar with :doc:`cpuidle` if you
+have not done that yet.]
+
+``intel_idle`` uses the ``MWAIT`` instruction to inform the processor that the
+logical CPU executing it is idle and so it may be possible to put some of the
+processor's functional blocks into low-power states.  That instruction takes two
+arguments (passed in the ``EAX`` and ``ECX`` registers of the target CPU), the
+first of which, referred to as a *hint*, can be used by the processor to
+determine what can be done (for details refer to Intel Software Developer’s
+Manual [1]_).  Accordingly, ``intel_idle`` refuses to work with processors in
+which the support for the ``MWAIT`` instruction has been disabled (for example,
+via the platform firmware configuration menu) or which do not support that
+instruction at all.
+
+``intel_idle`` is not modular, so it cannot be unloaded, which means that the
+only way to pass early-configuration-time parameters to it is via the kernel
+command line.
+
+
+.. _intel-idle-enumeration-of-states:
+
+Enumeration of Idle States
+==========================
+
+Each ``MWAIT`` hint value is interpreted by the processor as a license to
+reconfigure itself in a certain way in order to save energy.  The processor
+configurations (with reduced power draw) resulting from that are referred to
+as C-states (in the ACPI terminology) or idle states.  The list of meaningful
+``MWAIT`` hint values and idle states (i.e. low-power configurations of the
+processor) corresponding to them depends on the processor model and it may also
+depend on the configuration of the platform.
+
+In order to create a list of available idle states required by the ``CPUIdle``
+subsystem (see :ref:`idle-states-representation` in :doc:`cpuidle`),
+``intel_idle`` can use two sources of information: static tables of idle states
+for different processor models included in the driver itself and the ACPI tables
+of the system.  The former are always used if the processor model at hand is
+recognized by ``intel_idle`` and the latter are used if that is required for
+the given processor model (which is the case for all server processor models
+recognized by ``intel_idle``) or if the processor model is not recognized.
+
+If the ACPI tables are going to be used for building the list of available idle
+states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI
+objects corresponding to the CPUs in the system (refer to the ACPI specification
+[2]_ for the description of ``_CST`` and its output package).  Because the
+``CPUIdle`` subsystem expects that the list of idle states supplied by the
+driver will be suitable for all of the CPUs handled by it and ``intel_idle`` is
+registered as the ``CPUIdle`` driver for all of the CPUs in the system, the
+driver looks for the first ``_CST`` object returning at least one valid idle
+state description and such that all of the idle states included in its return
+package are of the FFH (Functional Fixed Hardware) type, which means that the
+``MWAIT`` instruction is expected to be used to tell the processor that it can
+enter one of them.  The return package of that ``_CST`` is then assumed to be
+applicable to all of the other CPUs in the system and the idle state
+descriptions extracted from it are stored in a preliminary list of idle states
+coming from the ACPI tables.  [This step is skipped if ``intel_idle`` is
+configured to ignore the ACPI tables; see `below <intel-idle-parameters_>`_.]
+
+Next, the first (index 0) entry in the list of available idle states is
+initialized to represent a "polling idle state" (a pseudo-idle state in which
+the target CPU continuously fetches and executes instructions), and the
+subsequent (real) idle state entries are populated as follows.
+
+If the processor model at hand is recognized by ``intel_idle``, there is a
+(static) table of idle state descriptions for it in the driver.  In that case,
+the "internal" table is the primary source of information on idle states and the
+information from it is copied to the final list of available idle states.  If
+using the ACPI tables for the enumeration of idle states is not required
+(depending on the processor model), all of the listed idle state are enabled by
+default (so all of them will be taken into consideration by ``CPUIdle``
+governors during CPU idle state selection).  Otherwise, some of the listed idle
+states may not be enabled by default if there are no matching entries in the
+preliminary list of idle states coming from the ACPI tables.  In that case user
+space still can enable them later (on a per-CPU basis) with the help of
+the ``disable`` idle state attribute in ``sysfs`` (see
+:ref:`idle-states-representation` in :doc:`cpuidle`).  This basically means that
+the idle states "known" to the driver may not be enabled by default if they have
+not been exposed by the platform firmware (through the ACPI tables).
+
+If the given processor model is not recognized by ``intel_idle``, but it
+supports ``MWAIT``, the preliminary list of idle states coming from the ACPI
+tables is used for building the final list that will be supplied to the
+``CPUIdle`` core during driver registration.  For each idle state in that list,
+the description, ``MWAIT`` hint and exit latency are copied to the corresponding
+entry in the final list of idle states.  The name of the idle state represented
+by it (to be returned by the ``name`` idle state attribute in ``sysfs``) is
+"CX_ACPI", where X is the index of that idle state in the final list (note that
+the minimum value of X is 1, because 0 is reserved for the "polling" state), and
+its target residency is based on the exit latency value.  Specifically, for
+C1-type idle states the exit latency value is also used as the target residency
+(for compatibility with the majority of the "internal" tables of idle states for
+various processor models recognized by ``intel_idle``) and for the other idle
+state types (C2 and C3) the target residency value is 3 times the exit latency
+(again, that is because it reflects the target residency to exit latency ratio
+in the majority of cases for the processor models recognized by ``intel_idle``).
+All of the idle states in the final list are enabled by default in this case.
+
+
+.. _intel-idle-initialization:
+
+Initialization
+==============
+
+The initialization of ``intel_idle`` starts with checking if the kernel command
+line options forbid the use of the ``MWAIT`` instruction.  If that is the case,
+an error code is returned right away.
+
+The next step is to check whether or not the processor model is known to the
+driver, which determines the idle states enumeration method (see
+`above <intel-idle-enumeration-of-states_>`_), and whether or not the processor
+supports ``MWAIT`` (the initialization fails if that is not the case).  Then,
+the ``MWAIT`` support in the processor is enumerated through ``CPUID`` and the
+driver initialization fails if the level of support is not as expected (for
+example, if the total number of ``MWAIT`` substates returned is 0).
+
+Next, if the driver is not configured to ignore the ACPI tables (see
+`below <intel-idle-parameters_>`_), the idle states information provided by the
+platform firmware is extracted from them.
+
+Then, ``CPUIdle`` device objects are allocated for all CPUs and the list of
+available idle states is created as explained
+`above <intel-idle-enumeration-of-states_>`_.
+
+Finally, ``intel_idle`` is registered with the help of cpuidle_register_driver()
+as the ``CPUIdle`` driver for all CPUs in the system and a CPU online callback
+for configuring individual CPUs is registered via cpuhp_setup_state(), which
+(among other things) causes the callback routine to be invoked for all of the
+CPUs present in the system at that time (each CPU executes its own instance of
+the callback routine).  That routine registers a ``CPUIdle`` device for the CPU
+running it (which enables the ``CPUIdle`` subsystem to operate that CPU) and
+optionally performs some CPU-specific initialization actions that may be
+required for the given processor model.
+
+
+.. _intel-idle-parameters:
+
+Kernel Command Line Options and Module Parameters
+=================================================
+
+The *x86* architecture support code recognizes three kernel command line
+options related to CPU idle time management: ``idle=poll``, ``idle=halt``,
+and ``idle=nomwait``.  If any of them is present in the kernel command line, the
+``MWAIT`` instruction is not allowed to be used, so the initialization of
+``intel_idle`` will fail.
+
+Apart from that there are two module parameters recognized by ``intel_idle``
+itself that can be set via the kernel command line (they cannot be updated via
+sysfs, so that is the only way to change their values).
+
+The ``max_cstate`` parameter value is the maximum idle state index in the list
+of idle states supplied to the ``CPUIdle`` core during the registration of the
+driver.  It is also the maximum number of regular (non-polling) idle states that
+can be used by ``intel_idle``, so the enumeration of idle states is terminated
+after finding that number of usable idle states (the other idle states that
+potentially might have been used if ``max_cstate`` had been greater are not
+taken into consideration at all).  Setting ``max_cstate`` can prevent
+``intel_idle`` from exposing idle states that are regarded as "too deep" for
+some reason to the ``CPUIdle`` core, but it does so by making them effectively
+invisible until the system is shut down and started again which may not always
+be desirable.  In practice, it is only really necessary to do that if the idle
+states in question cannot be enabled during system startup, because in the
+working state of the system the CPU power management quality of service (PM
+QoS) feature can be used to prevent ``CPUIdle`` from touching those idle states
+even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`).
+Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail.
+
+The ``noacpi`` module parameter (which is recognized by ``intel_idle`` if the
+kernel has been configured with ACPI support), can be set to make the driver
+ignore the system's ACPI tables entirely (it is unset by default).
+
+
+.. _intel-idle-core-and-package-idle-states:
+
+Core and Package Levels of Idle States
+======================================
+
+Typically, in a processor supporting the ``MWAIT`` instruction there are (at
+least) two levels of idle states (or C-states).  One level, referred to as
+"core C-states", covers individual cores in the processor, whereas the other
+level, referred to as "package C-states", covers the entire processor package
+and it may also involve other components of the system (GPUs, memory
+controllers, I/O hubs etc.).
+
+Some of the ``MWAIT`` hint values allow the processor to use core C-states only
+(most importantly, that is the case for the ``MWAIT`` hint value corresponding
+to the ``C1`` idle state), but the majority of them give it a license to put
+the target core (i.e. the core containing the logical CPU executing ``MWAIT``
+with the given hint value) into a specific core C-state and then (if possible)
+to enter a specific package C-state at the deeper level.  For example, the
+``MWAIT`` hint value representing the ``C3`` idle state allows the processor to
+put the target core into the low-power state referred to as "core ``C3``" (or
+``CC3``), which happens if all of the logical CPUs (SMT siblings) in that core
+have executed ``MWAIT`` with the ``C3`` hint value (or with a hint value
+representing a deeper idle state), and in addition to that (in the majority of
+cases) it gives the processor a license to put the entire package (possibly
+including some non-CPU components such as a GPU or a memory controller) into the
+low-power state referred to as "package ``C3``" (or ``PC3``), which happens if
+all of the cores have gone into the ``CC3`` state and (possibly) some additional
+conditions are satisfied (for instance, if the GPU is covered by ``PC3``, it may
+be required to be in a certain GPU-specific low-power state for ``PC3`` to be
+reachable).
+
+As a rule, there is no simple way to make the processor use core C-states only
+if the conditions for entering the corresponding package C-states are met, so
+the logical CPU executing ``MWAIT`` with a hint value that is not core-level
+only (like for ``C1``) must always assume that this may cause the processor to
+enter a package C-state.  [That is why the exit latency and target residency
+values corresponding to the majority of ``MWAIT`` hint values in the "internal"
+tables of idle states in ``intel_idle`` reflect the properties of package
+C-states.]  If using package C-states is not desirable at all, either
+:ref:`PM QoS <cpu-pm-qos>` or the ``max_cstate`` module parameter of
+``intel_idle`` described `above <intel-idle-parameters_>`_ must be used to
+restrict the range of permissible idle states to the ones with core-level only
+``MWAIT`` hint values (like ``C1``).
+
+
+References
+==========
+
+.. [1] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 2B*,
+       https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2b-manual.html
+
+.. [2] *Advanced Configuration and Power Interface (ACPI) Specification*,
+       https://uefi.org/specifications
index fc298eb1234b07ba70e35ae26d1dd750834419d7..88f717e59a42b3f53f30ac3c6f872b93ac603b45 100644 (file)
@@ -8,6 +8,7 @@ Working-State Power Management
    :maxdepth: 2
 
    cpuidle
+   intel_idle
    cpufreq
    intel_pstate
    intel_epb
index fb5b39f73059d15fdcba0ce922f85006193f098c..ad911be5b5e93e8d931ce1442b901c148fc013bd 100644 (file)
@@ -253,7 +253,7 @@ The following sysctls are available for the XFS filesystem:
        pool.
 
   fs.xfs.speculative_prealloc_lifetime
-               (Units: seconds   Min: 1  Default: 300  Max: 86400)
+       (Units: seconds   Min: 1  Default: 300  Max: 86400)
        The interval at which the background scanning for inodes
        with unused speculative preallocation runs. The scan
        removes unused preallocation from clean inodes and releases
index b6e44884e3ada435d49f1badd4e2c749a5b6b448..41937a8091aaa2a9c2252fc3f98615a4bbb25436 100644 (file)
@@ -117,6 +117,8 @@ infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | RNDR                         | [63-60] |    y    |
+     +------------------------------+---------+---------+
      | TS                           | [55-52] |    y    |
      +------------------------------+---------+---------+
      | FHM                          | [51-48] |    y    |
@@ -200,6 +202,12 @@ infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | I8MM                         | [55-52] |    y    |
+     +------------------------------+---------+---------+
+     | DGH                          | [51-48] |    y    |
+     +------------------------------+---------+---------+
+     | BF16                         | [47-44] |    y    |
+     +------------------------------+---------+---------+
      | SB                           | [39-36] |    y    |
      +------------------------------+---------+---------+
      | FRINTTS                      | [35-32] |    y    |
@@ -234,10 +242,18 @@ infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | F64MM                        | [59-56] |    y    |
+     +------------------------------+---------+---------+
+     | F32MM                        | [55-52] |    y    |
+     +------------------------------+---------+---------+
+     | I8MM                         | [47-44] |    y    |
+     +------------------------------+---------+---------+
      | SM4                          | [43-40] |    y    |
      +------------------------------+---------+---------+
      | SHA3                         | [35-32] |    y    |
      +------------------------------+---------+---------+
+     | BF16                         | [23-20] |    y    |
+     +------------------------------+---------+---------+
      | BitPerm                      | [19-16] |    y    |
      +------------------------------+---------+---------+
      | AES                          | [7-4]   |    y    |
index 7fa3d215ae6a80e5d3dd9b6db3bd221d4b6d8c48..7dfb97dfe416058f20ef3863c9185d9a071f46ec 100644 (file)
@@ -204,6 +204,37 @@ HWCAP2_FRINT
 
     Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
 
+HWCAP2_SVEI8MM
+
+    Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
+
+HWCAP2_SVEF32MM
+
+    Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
+
+HWCAP2_SVEF64MM
+
+    Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
+
+HWCAP2_SVEBF16
+
+    Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
+
+HWCAP2_I8MM
+
+    Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
+
+HWCAP2_BF16
+
+    Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0001.
+
+HWCAP2_DGH
+
+    Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001.
+
+HWCAP2_RNG
+
+    Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
 
 4. Unused AT_HWCAP bits
 -----------------------
index 99b2545455ff9a6e66e3931bc4c8e847d0dbf12f..9120e59578dcaaf24e5633cd0c1a2ec37b1ea278 100644 (file)
@@ -88,6 +88,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A76      | #1463225        | ARM64_ERRATUM_1463225       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A55      | #1530923        | ARM64_ERRATUM_1530923       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N1     | #1188873,1418040| ARM64_ERRATUM_1418040       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N1     | #1349291        | N/A                         |
index fcedc5349ace406585db870f87f3fc632a10959a..640934b6f7b4dfe4827f1156c4639d83c4db8a95 100644 (file)
@@ -25,10 +25,6 @@ good performance with large indices.  If your index can be larger than
 ``ULONG_MAX`` then the XArray is not the data type for you.  The most
 important user of the XArray is the page cache.
 
-Each non-``NULL`` entry in the array has three bits associated with
-it called marks.  Each mark may be set or cleared independently of
-the others.  You can iterate over entries which are marked.
-
 Normal pointers may be stored in the XArray directly.  They must be 4-byte
 aligned, which is true for any pointer returned from kmalloc() and
 alloc_page().  It isn't true for arbitrary user-space pointers,
@@ -41,12 +37,11 @@ When you retrieve an entry from the XArray, you can check whether it is
 a value entry by calling xa_is_value(), and convert it back to
 an integer by calling xa_to_value().
 
-Some users want to store tagged pointers instead of using the marks
-described above.  They can call xa_tag_pointer() to create an
-entry with a tag, xa_untag_pointer() to turn a tagged entry
-back into an untagged pointer and xa_pointer_tag() to retrieve
-the tag of an entry.  Tagged pointers use the same bits that are used
-to distinguish value entries from normal pointers, so each user must
+Some users want to tag the pointers they store in the XArray.  You can
+call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
+to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
+to retrieve the tag of an entry.  Tagged pointers use the same bits that
+are used to distinguish value entries from normal pointers, so you must
 decide whether they want to store value entries or tagged pointers in
 any particular XArray.
 
@@ -56,10 +51,9 @@ conflict with value entries or internal entries.
 An unusual feature of the XArray is the ability to create entries which
 occupy a range of indices.  Once stored to, looking up any index in
 the range will return the same entry as looking up any other index in
-the range.  Setting a mark on one index will set it on all of them.
-Storing to any index will store to all of them.  Multi-index entries can
-be explicitly split into smaller entries, or storing ``NULL`` into any
-entry will cause the XArray to forget about the range.
+the range.  Storing to any index will store to all of them.  Multi-index
+entries can be explicitly split into smaller entries, or storing ``NULL``
+into any entry will cause the XArray to forget about the range.
 
 Normal API
 ==========
@@ -87,17 +81,11 @@ If you want to only store a new entry to an index if the current entry
 at that index is ``NULL``, you can use xa_insert() which
 returns ``-EBUSY`` if the entry is not empty.
 
-You can enquire whether a mark is set on an entry by using
-xa_get_mark().  If the entry is not ``NULL``, you can set a mark
-on it by using xa_set_mark() and remove the mark from an entry by
-calling xa_clear_mark().  You can ask whether any entry in the
-XArray has a particular mark set by calling xa_marked().
-
 You can copy entries out of the XArray into a plain array by calling
-xa_extract().  Or you can iterate over the present entries in
-the XArray by calling xa_for_each().  You may prefer to use
-xa_find() or xa_find_after() to move to the next present
-entry in the XArray.
+xa_extract().  Or you can iterate over the present entries in the XArray
+by calling xa_for_each(), xa_for_each_start() or xa_for_each_range().
+You may prefer to use xa_find() or xa_find_after() to move to the next
+present entry in the XArray.
 
 Calling xa_store_range() stores the same entry in a range
 of indices.  If you do this, some of the other operations will behave
@@ -124,6 +112,31 @@ xa_destroy().  If the XArray entries are pointers, you may wish
 to free the entries first.  You can do this by iterating over all present
 entries in the XArray using the xa_for_each() iterator.
 
+Search Marks
+------------
+
+Each entry in the array has three bits associated with it called marks.
+Each mark may be set or cleared independently of the others.  You can
+iterate over marked entries by using the xa_for_each_marked() iterator.
+
+You can enquire whether a mark is set on an entry by using
+xa_get_mark().  If the entry is not ``NULL``, you can set a mark on it
+by using xa_set_mark() and remove the mark from an entry by calling
+xa_clear_mark().  You can ask whether any entry in the XArray has a
+particular mark set by calling xa_marked().  Erasing an entry from the
+XArray causes all marks associated with that entry to be cleared.
+
+Setting or clearing a mark on any index of a multi-index entry will
+affect all indices covered by that entry.  Querying the mark on any
+index will return the same result.
+
+There is no way to iterate over entries which are not marked; the data
+structure does not allow this to be implemented efficiently.  There are
+not currently iterators to search for logical combinations of bits (eg
+iterate over all entries which have both ``XA_MARK_1`` and ``XA_MARK_2``
+set, or iterate over all entries which have ``XA_MARK_0`` or ``XA_MARK_2``
+set).  It would be possible to add these if a user arises.
+
 Allocating XArrays
 ------------------
 
@@ -180,6 +193,8 @@ No lock needed:
 Takes RCU read lock:
  * xa_load()
  * xa_for_each()
+ * xa_for_each_start()
+ * xa_for_each_range()
  * xa_find()
  * xa_find_after()
  * xa_extract()
@@ -419,10 +434,9 @@ you last processed.  If you have interrupts disabled while iterating,
 then it is good manners to pause the iteration and reenable interrupts
 every ``XA_CHECK_SCHED`` entries.
 
-The xas_get_mark(), xas_set_mark() and
-xas_clear_mark() functions require the xa_state cursor to have
-been moved to the appropriate location in the xarray; they will do
-nothing if you have called xas_pause() or xas_set()
+The xas_get_mark(), xas_set_mark() and xas_clear_mark() functions require
+the xa_state cursor to have been moved to the appropriate location in the
+XArray; they will do nothing if you have called xas_pause() or xas_set()
 immediately before.
 
 You can call xas_set_update() to have a callback function
index 36890b026e7777b206b8da3ca107f9ad81cb89b2..1c4e1825d769590a98aa9371546b8d716bd67ad5 100644 (file)
@@ -251,11 +251,11 @@ selectively from different subsystems.
 .. code-block:: c
 
     struct kcov_remote_arg {
-       unsigned        trace_mode;
-       unsigned        area_size;
-       unsigned        num_handles;
-       uint64_t        common_handle;
-       uint64_t        handles[0];
+       __u32           trace_mode;
+       __u32           area_size;
+       __u32           num_handles;
+       __aligned_u64   common_handle;
+       __aligned_u64   handles[0];
     };
 
     #define KCOV_INIT_TRACE                    _IOR('c', 1, unsigned long)
index ecdfdc9d4b0320f9ea4fb51974bfd5e35dfa36e0..61ae13c44f915db2b8339541798eb0266e10e3d4 100644 (file)
@@ -203,12 +203,12 @@ Test Module
 Kselftest tests the kernel from userspace.  Sometimes things need
 testing from within the kernel, one method of doing this is to create a
 test module.  We can tie the module into the kselftest framework by
-using a shell script test runner.  ``kselftest_module.sh`` is designed
+using a shell script test runner.  ``kselftest/module.sh`` is designed
 to facilitate this process.  There is also a header file provided to
 assist writing kernel modules that are for use with kselftest:
 
 - ``tools/testing/kselftest/kselftest_module.h``
-- ``tools/testing/kselftest/kselftest_module.sh``
+- ``tools/testing/kselftest/kselftest/module.sh``
 
 How to use
 ----------
@@ -247,7 +247,7 @@ A bare bones test module might look like this:
 
    #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-   #include "../tools/testing/selftests/kselftest_module.h"
+   #include "../tools/testing/selftests/kselftest/module.h"
 
    KSTM_MODULE_GLOBALS();
 
@@ -276,7 +276,7 @@ Example test script
 
     #!/bin/bash
     # SPDX-License-Identifier: GPL-2.0+
-    $(dirname $0)/../kselftest_module.sh "foo" test_foo
+    $(dirname $0)/../kselftest/module.sh "foo" test_foo
 
 
 Test Harness
index 26ffb46bdf99d0382d29bde9173c75a61b74deee..c60d760a0eed1f732cca2fd3c51075368a0ce24f 100644 (file)
@@ -9,6 +9,7 @@ KUnit - Unit Testing for the Linux Kernel
 
        start
        usage
+       kunit-tool
        api/index
        faq
 
diff --git a/Documentation/dev-tools/kunit/kunit-tool.rst b/Documentation/dev-tools/kunit/kunit-tool.rst
new file mode 100644 (file)
index 0000000..50d4639
--- /dev/null
@@ -0,0 +1,57 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+kunit_tool How-To
+=================
+
+What is kunit_tool?
+===================
+
+kunit_tool is a script (``tools/testing/kunit/kunit.py``) that aids in building
+the Linux kernel as UML (`User Mode Linux
+<http://user-mode-linux.sourceforge.net/>`_), running KUnit tests, parsing
+the test results and displaying them in a user friendly manner.
+
+What is a kunitconfig?
+======================
+
+It's just a defconfig that kunit_tool looks for in the base directory.
+kunit_tool uses it to generate a .config as you might expect. In addition, it
+verifies that the generated .config contains the CONFIG options in the
+kunitconfig; the reason it does this is so that it is easy to be sure that a
+CONFIG that enables a test actually ends up in the .config.
+
+How do I use kunit_tool?
+========================
+
+If a kunitconfig is present at the root directory, all you have to do is:
+
+.. code-block:: bash
+
+       ./tools/testing/kunit/kunit.py run
+
+However, you most likely want to use it with the following options:
+
+.. code-block:: bash
+
+       ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all`
+
+- ``--timeout`` sets a maximum amount of time to allow tests to run.
+- ``--jobs`` sets the number of threads to use to build the kernel.
+
+If you just want to use the defconfig that ships with the kernel, you can
+append the ``--defconfig`` flag as well:
+
+.. code-block:: bash
+
+       ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` --defconfig
+
+.. note::
+       This command is particularly helpful for getting started because it
+       just works. No kunitconfig needs to be present.
+
+For a list of all the flags supported by kunit_tool, you can run:
+
+.. code-block:: bash
+
+       ./tools/testing/kunit/kunit.py run --help
index aeeddfafeea20befae5a3c28b25f207f226d78fa..4e1d24db6b139f60f0576fae9e30f1b7ad7e5c6a 100644 (file)
@@ -19,21 +19,21 @@ The wrapper can be run with:
 
 .. code-block:: bash
 
-   ./tools/testing/kunit/kunit.py run
+       ./tools/testing/kunit/kunit.py run --defconfig
 
-Creating a kunitconfig
-======================
-The Python script is a thin wrapper around Kbuild as such, it needs to be
-configured with a ``kunitconfig`` file. This file essentially contains the
+For more information on this wrapper (also called kunit_tool) checkout the
+:doc:`kunit-tool` page.
+
+Creating a .kunitconfig
+=======================
+The Python script is a thin wrapper around Kbuild. As such, it needs to be
+configured with a ``.kunitconfig`` file. This file essentially contains the
 regular Kernel config, with the specific test targets as well.
 
 .. code-block:: bash
 
-       git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
        cd $PATH_TO_LINUX_REPO
-       ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
-
-You may want to add kunitconfig to your local gitignore.
+       cp arch/um/configs/kunit_defconfig .kunitconfig
 
 Verifying KUnit Works
 ---------------------
@@ -59,8 +59,8 @@ If everything worked correctly, you should see the following:
 followed by a list of tests that are run. All of them should be passing.
 
 .. note::
-   Because it is building a lot of sources for the first time, the ``Building
-   kunit kernel`` step may take a while.
+       Because it is building a lot of sources for the first time, the
+       ``Building KUnit kernel`` step may take a while.
 
 Writing your first test
 =======================
@@ -148,7 +148,7 @@ and the following to ``drivers/misc/Makefile``:
 
        obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
 
-Now add it to your ``kunitconfig``:
+Now add it to your ``.kunitconfig``:
 
 .. code-block:: none
 
@@ -159,7 +159,7 @@ Now you can run the test:
 
 .. code-block:: bash
 
-       ./tools/testing/kunit/kunit.py
+       ./tools/testing/kunit/kunit.py run
 
 You should see the following failure:
 
index c6e69634e274b40b617b23d539734d344b6c88ae..b9a065ab681eeae52d21463daa12d03c633603bd 100644 (file)
@@ -16,7 +16,7 @@ Organization of this document
 =============================
 
 This document is organized into two main sections: Testing and Isolating
-Behavior. The first covers what a unit test is and how to use KUnit to write
+Behavior. The first covers what unit tests are and how to use KUnit to write
 them. The second covers how to use KUnit to isolate code and make it possible
 to unit test code that was otherwise un-unit-testable.
 
@@ -174,13 +174,13 @@ Test Suites
 ~~~~~~~~~~~
 
 Now obviously one unit test isn't very helpful; the power comes from having
-many test cases covering all of your behaviors. Consequently it is common to
-have many *similar* tests; in order to reduce duplication in these closely
-related tests most unit testing frameworks provide the concept of a *test
-suite*, in KUnit we call it a *test suite*; all it is is just a collection of
-test cases for a unit of code with a set up function that gets invoked before
-every test cases and then a tear down function that gets invoked after every
-test case completes.
+many test cases covering all of a unit's behaviors. Consequently it is common
+to have many *similar* tests; in order to reduce duplication in these closely
+related tests most unit testing frameworks - including KUnit - provide the
+concept of a *test suite*. A *test suite* is just a collection of test cases
+for a unit of code with a set up function that gets invoked before every test
+case and then a tear down function that gets invoked after every test case
+completes.
 
 Example:
 
@@ -211,7 +211,7 @@ KUnit test framework.
 .. note::
    A test case will only be run if it is associated with a test suite.
 
-For more information on these types of things see the :doc:`api/test`.
+For more information on these types of things see the :doc:`api/test`.
 
 Isolating Behavior
 ==================
@@ -338,7 +338,7 @@ We can easily test this code by *faking out* the underlying EEPROM:
                return count;
        }
 
-       ssize_t fake_eeprom_write(struct eeprom *this, size_t offset, const char *buffer, size_t count)
+       ssize_t fake_eeprom_write(struct eeprom *parent, size_t offset, const char *buffer, size_t count)
        {
                struct fake_eeprom *this = container_of(parent, struct fake_eeprom, parent);
 
@@ -454,7 +454,7 @@ KUnit on non-UML architectures
 By default KUnit uses UML as a way to provide dependencies for code under test.
 Under most circumstances KUnit's usage of UML should be treated as an
 implementation detail of how KUnit works under the hood. Nevertheless, there
-are instances where being able to run architecture specific code, or test
+are instances where being able to run architecture specific code or test
 against real hardware is desirable. For these reasons KUnit supports running on
 other architectures.
 
@@ -557,7 +557,7 @@ run your tests on your hardware setup just by compiling for your architecture.
 .. important::
    Always prefer tests that run on UML to tests that only run under a particular
    architecture, and always prefer tests that run under QEMU or another easy
-   (and monitarily free) to obtain software environment to a specific piece of
+   (and monetarily free) to obtain software environment to a specific piece of
    hardware.
 
 Nevertheless, there are still valid reasons to write an architecture or hardware
index 9fbde401a0909c94ca0cc12ae6b3550cb5d540a7..e003a553b98615687fda3834b000b4469791b25c 100644 (file)
@@ -10,6 +10,12 @@ PIT Timer required properties:
 - interrupts: Should contain interrupt for the PIT which is the IRQ line
   shared across all System Controller members.
 
+PIT64B Timer required properties:
+- compatible: Should be "microchip,sam9x60-pit64b"
+- reg: Should contain registers location and length
+- interrupts: Should contain interrupt for PIT64B timer
+- clocks: Should contain the available clock sources for PIT64B timer.
+
 System Timer (ST) required properties:
 - compatible: Should be "atmel,at91rm9200-st", "syscon", "simple-mfd"
 - reg: Should contain registers location and length
index 8a1e38a1d7ab196debe1feb614109598a35928f0..cffe8bb0bad106b35586956649dab8476e2fa5a8 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner platforms device tree bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   $nodename:
index 7713a413c6a7e280b4a8feb0f4ccb74629d4a920..b9ae4ce4a0a064a2aa0e3ef7a62201cb5efb677c 100644 (file)
@@ -5,6 +5,7 @@ Each SATA controller should have its own node.
 
 Required properties:
 - compatible         : should be one or more of
+                       "brcm,bcm7216-ahci"
                        "brcm,bcm7425-ahci"
                        "brcm,bcm7445-ahci"
                        "brcm,bcm-nsp-ahci"
@@ -14,6 +15,12 @@ Required properties:
 - reg-names          : "ahci" and "top-ctrl"
 - interrupts         : interrupt mapping for SATA IRQ
 
+Optional properties:
+
+- reset: for "brcm,bcm7216-ahci" must be a valid reset phandle
+  pointing to the RESCAL reset controller provider node.
+- reset-names: for "brcm,bcm7216-ahci", must be "rescal".
+
 Also see ahci-platform.txt.
 
 Example:
index d2a872286437e4c65c12f94f506283aed588bd4b..f0b3d30fbb76f7401b3f41b2d583b05de58fe278 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A64 Display Engine Bus Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   $nodename:
index be32f087c529303e02db711f4d88a5f44f33be6d..9fe11ceecdba004c5546f30711be002eaca64694 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A23 RSB Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#address-cells":
index 64938fdaea554973d960bd1c3b4f01e130747167..4d382128b711c8cace6414831a0a6ff07238a77e 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner Clock Control Unit Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#clock-cells":
index 80b3e7350a732b8e2fbe4ab39a9a99f7b539bc28..33c7842917f629c8176df1f94ad52e4fc629a271 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Security System Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index dafc0980c4fa7a241722430c6df62f4f331d1601..0f7074977c04d836a329e147847ceda0d5c46249 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A31 MIPI-DSI Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#address-cells": true
index 0e7987f1cdb799ad01257b445b25f21876ffe65f..d67617f6f74ab10eabd05a1830e9ac448c113f69 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Ronbo RB070D30 DSI Display Panel
 
 maintainers:
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 15abc0f9429fcd8b813d620d8e8a0f115b70a6de..83808199657b731544c5fd0059b9f6639bcdf92b 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 DMA Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: "dma-controller.yaml#"
index 387d599522c708eaef3a201f8afbf7382c88aab2..9e53472be1947d0dcf4aa0fc26ab32f6b6c8031f 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A64 DMA Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: "dma-controller.yaml#"
index 740b7f9b535b29267d834e35181d9782f24b5280..c1676b96daac7e13d370c7c1f32a71f4ea7df3cb 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A31 DMA Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: "dma-controller.yaml#"
index 29dd3ccb1235c59a92eedc96a1bc79442b28f672..e77b08ebcd06502c52d2e21ed37f6fee0e5aba18 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
 - compatible :
        - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
        - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
+       - "fsl,fsl,ls1028a-edma" for eDMA used similar to that on Vybrid vf610 SoC
 - reg : Specifies base physical address(s) and size of the eDMA registers.
        The 1st region is eDMA control register's address and size.
        The 2nd and the 3rd regions are programmable channel multiplexing
index 9d8bbac27d8b9dae370f087bf85fce15c610d894..c9e97409e8538b8572ca2d89f781511afda1fa7a 100644 (file)
@@ -10,6 +10,9 @@ Required properties:
       "fsl,imx6q-sdma"
       "fsl,imx7d-sdma"
       "fsl,imx8mq-sdma"
+      "fsl,imx8mm-sdma"
+      "fsl,imx8mn-sdma"
+      "fsl,imx8mp-sdma"
   The -to variants should be preferred since they allow to determine the
   correct ROM script addresses needed for the driver to work without additional
   firmware.
index ec89782d949884829b76e85f0979f1d0d5b9f41a..3459e77be294df8f15355e59869cf28132641d28 100644 (file)
@@ -1,4 +1,4 @@
-* Ingenic JZ4780 DMA Controller
+* Ingenic XBurst DMA Controller
 
 Required properties:
 
@@ -8,10 +8,12 @@ Required properties:
   * ingenic,jz4770-dma
   * ingenic,jz4780-dma
   * ingenic,x1000-dma
+  * ingenic,x1830-dma
 - reg: Should contain the DMA channel registers location and length, followed
   by the DMA controller registers location and length.
 - interrupts: Should contain the interrupt specifier of the DMA controller.
-- clocks: Should contain a clock specifier for the JZ4780/X1000 PDMA clock.
+- clocks: Should contain a clock specifier for the JZ4780/X1000/X1830 PDMA
+  clock.
 - #dma-cells: Must be <2>. Number of integer cells in the dmas property of
   DMA clients (see below).
 
index 5551e929fd99f6305333362df63034ac8e5aa48e..b7f81c63be8bdc33d150a9efa2c41fbbe8d5a4b8 100644 (file)
@@ -30,6 +30,7 @@ Required Properties:
                - "renesas,dmac-r8a7794" (R-Car E2)
                - "renesas,dmac-r8a7795" (R-Car H3)
                - "renesas,dmac-r8a7796" (R-Car M3-W)
+               - "renesas,dmac-r8a77961" (R-Car M3-W+)
                - "renesas,dmac-r8a77965" (R-Car M3-N)
                - "renesas,dmac-r8a77970" (R-Car V3M)
                - "renesas,dmac-r8a77980" (R-Car V3H)
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
new file mode 100644 (file)
index 0000000..8b5c346
--- /dev/null
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
+
+maintainers:
+  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+description: |
+  The UDMA-P is intended to perform similar (but significantly upgraded)
+  functions as the packet-oriented DMA used on previous SoC devices. The UDMA-P
+  module supports the transmission and reception of various packet types.
+  The UDMA-P architecture facilitates the segmentation and reassembly of SoC DMA
+  data structure compliant packets to/from smaller data blocks that are natively
+  compatible with the specific requirements of each connected peripheral.
+  Multiple Tx and Rx channels are provided within the DMA which allow multiple
+  segmentation or reassembly operations to be ongoing. The DMA controller
+  maintains state information for each of the channels which allows packet
+  segmentation and reassembly operations to be time division multiplexed between
+  channels in order to share the underlying DMA hardware. An external DMA
+  scheduler is used to control the ordering and rate at which this multiplexing
+  occurs for Transmit operations. The ordering and rate of Receive operations
+  is indirectly controlled by the order in which blocks are pushed into the DMA
+  on the Rx PSI-L interface.
+
+  The UDMA-P also supports acting as both a UTC and UDMA-C for its internal
+  channels. Channels in the UDMA-P can be configured to be either Packet-Based
+  or Third-Party channels on a channel by channel basis.
+
+  All transfers within NAVSS is done between PSI-L source and destination
+  threads.
+  The peripherals serviced by UDMA can be PSI-L native (sa2ul, cpsw, etc) or
+  legacy, non PSI-L native peripherals. In the later case a special, small PDMA
+  is tasked to act as a bridge between the PSI-L fabric and the legacy
+  peripheral.
+
+  PDMAs can be configured via UDMAP peer registers to match with the
+  configuration of the legacy peripheral.
+
+allOf:
+  - $ref: "../dma-controller.yaml#"
+
+properties:
+  "#dma-cells":
+    const: 1
+    description: |
+      The cell is the PSI-L  thread ID of the remote (to UDMAP) end.
+      Valid ranges for thread ID depends on the data movement direction:
+      for source thread IDs (rx): 0 - 0x7fff
+      for destination thread IDs (tx): 0x8000 - 0xffff
+
+      Please refer to the device documentation for the PSI-L thread map and also
+      the PSI-L peripheral chapter for the correct thread ID.
+
+  compatible:
+    enum:
+      - ti,am654-navss-main-udmap
+      - ti,am654-navss-mcu-udmap
+      - ti,j721e-navss-main-udmap
+      - ti,j721e-navss-mcu-udmap
+
+  reg:
+    maxItems: 3
+
+  reg-names:
+   items:
+     - const: gcfg
+     - const: rchanrt
+     - const: tchanrt
+
+  msi-parent: true
+
+  ti,sci:
+    description: phandle to TI-SCI compatible System controller node
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+  ti,sci-dev-id:
+    description: TI-SCI device id of UDMAP
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+
+  ti,ringacc:
+    description: phandle to the ring accelerator node
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/phandle
+
+  ti,sci-rm-range-tchan:
+    description: |
+      Array of UDMA tchan resource subtypes for resource allocation for this
+      host
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 1
+    # Should be enough
+    maxItems: 255
+
+  ti,sci-rm-range-rchan:
+    description: |
+      Array of UDMA rchan resource subtypes for resource allocation for this
+      host
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 1
+    # Should be enough
+    maxItems: 255
+
+  ti,sci-rm-range-rflow:
+    description: |
+      Array of UDMA rflow resource subtypes for resource allocation for this
+      host
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 1
+    # Should be enough
+    maxItems: 255
+
+required:
+  - compatible
+  - "#dma-cells"
+  - reg
+  - reg-names
+  - msi-parent
+  - ti,sci
+  - ti,sci-dev-id
+  - ti,ringacc
+  - ti,sci-rm-range-tchan
+  - ti,sci-rm-range-rchan
+  - ti,sci-rm-range-rflow
+
+examples:
+  - |+
+    cbass_main {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        cbass_main_navss: navss@30800000 {
+            compatible = "simple-mfd";
+            #address-cells = <2>;
+            #size-cells = <2>;
+            dma-coherent;
+            dma-ranges;
+            ranges;
+
+            ti,sci-dev-id = <118>;
+
+            main_udmap: dma-controller@31150000 {
+                compatible = "ti,am654-navss-main-udmap";
+                reg = <0x0 0x31150000 0x0 0x100>,
+                      <0x0 0x34000000 0x0 0x100000>,
+                      <0x0 0x35000000 0x0 0x100000>;
+                reg-names = "gcfg", "rchanrt", "tchanrt";
+                #dma-cells = <1>;
+
+                ti,ringacc = <&ringacc>;
+
+                msi-parent = <&inta_main_udmass>;
+
+                ti,sci = <&dmsc>;
+                ti,sci-dev-id = <188>;
+
+                ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
+                                        <0x2>; /* TX_CHAN */
+                ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
+                                        <0x5>; /* RX_CHAN */
+                ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+            };
+        };
+
+        mcasp0: mcasp@02B00000 {
+            dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+            dma-names = "tx", "rx";
+        };
+
+        crypto: crypto@4E00000 {
+            compatible = "ti,sa2ul-crypto";
+
+            dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>;
+            dma-names = "tx", "rx1", "rx2";
+        };
+    };
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
new file mode 100644 (file)
index 0000000..418e838
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/sifive,gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive GPIO controller
+
+maintainers:
+  - Yash Shah <yash.shah@sifive.com>
+  - Paul Walmsley <paul.walmsley@sifive.com>
+
+properties:
+  compatible:
+    items:
+      - const: sifive,fu540-c000-gpio
+      - const: sifive,gpio0
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    description:
+      interrupt mapping one per GPIO. Maximum 16 GPIOs.
+    minItems: 1
+    maxItems: 16
+
+  interrupt-controller: true
+
+  "#interrupt-cells":
+    const: 2
+
+  clocks:
+    maxItems: 1
+
+  "#gpio-cells":
+    const: 2
+
+  gpio-controller: true
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-controller
+  - "#interrupt-cells"
+  - clocks
+  - "#gpio-cells"
+  - gpio-controller
+
+additionalProperties: false
+
+examples:
+  - |
+      #include <dt-bindings/clock/sifive-fu540-prci.h>
+      gpio@10060000 {
+        compatible = "sifive,fu540-c000-gpio", "sifive,gpio0";
+        interrupt-parent = <&plic>;
+        interrupts = <7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22>;
+        reg = <0x0 0x10060000 0x0 0x1000>;
+        clocks = <&tlclk PRCI_CLK_TLCLK>;
+        gpio-controller;
+        #gpio-cells = <2>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+      };
+
+...
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
new file mode 100644 (file)
index 0000000..2a98220
--- /dev/null
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/adi,adm1177.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
+
+maintainers:
+  - Michael Hennerich <michael.hennerich@analog.com>
+  - Beniamin Bia <beniamin.bia@analog.com>
+
+description: |
+  Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
+  https://www.analog.com/media/en/technical-documentation/data-sheets/ADM1177.pdf
+
+properties:
+  compatible:
+    enum:
+      - adi,adm1177
+
+  reg:
+    maxItems: 1
+
+  avcc-supply:
+    description:
+      Phandle to the Avcc power supply
+
+  shunt-resistor-micro-ohms:
+    description:
+      The value of curent sense resistor in microohms. If not provided,
+      the current reading and overcurrent alert is disabled.
+
+  adi,shutdown-threshold-microamp:
+    description:
+      Specifies the current level at which an over current alert occurs.
+      If not provided, the overcurrent alert is configured to max ADC range
+      based on shunt-resistor-micro-ohms.
+
+  adi,vrange-high-enable:
+    description:
+      Specifies which internal voltage divider to be used. A 1 selects
+      a 7:2 voltage divider while a 0 selects a 14:1 voltage divider.
+    type: boolean
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pwmon@5a {
+                compatible = "adi,adm1177";
+                reg = <0x5a>;
+                shunt-resistor-micro-ohms = <50000>; /* 50 mOhm */
+                adi,shutdown-threshold-microamp = <1059000>; /* 1.059 A */
+                adi,vrange-high-enable;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
new file mode 100644 (file)
index 0000000..5d42e13
--- /dev/null
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/pmbus/ti,ucd90320.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UCD90320 power sequencer
+
+maintainers:
+  - Jim Wright <wrightj@linux.vnet.ibm.com>
+
+description: |
+  The UCD90320 is a 32-rail PMBus/I2C addressable power-supply sequencer and
+  monitor. The 24 integrated ADC channels (AMONx) monitor the power supply
+  voltage, current, and temperature. Of the 84 GPIO pins, 8 can be used as
+  digital monitors (DMONx), 32 to enable the power supply (ENx), 24 for
+  margining (MARx), 16 for logical GPO, and 32 GPIs for cascading, and system
+  function.
+
+  http://focus.ti.com/lit/ds/symlink/ucd90320.pdf
+
+properties:
+  compatible:
+    enum:
+      - ti,ucd90320
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        ucd90320@11 {
+            compatible = "ti,ucd90320";
+            reg = <0x11>;
+        };
+    };
index 9346ef6ba61b681e0dd303d58ade4a252a3594cb..6097e8ac46c1f5a9cfdfdb503b92380f28e2cf46 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A31 P2WI (Push/Pull 2 Wires Interface) Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index 2210f4359c4573a1d74af7dad7dbdc2ed6741d59..8347b1e7c08071023a149cbc5e75e83cded92a8d 100644 (file)
@@ -18,8 +18,10 @@ Optional properties:
 - dma-names: should contain "tx" and "rx".
 - atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
   capable I2C controllers.
-- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c"
-  and "atmel,sama5d2-i2c".
+- i2c-sda-hold-time-ns: TWD hold time, only available for:
+       "atmel,sama5d4-i2c",
+       "atmel,sama5d2-i2c",
+       "microchip,sam9x60-i2c".
 - Child nodes conforming to i2c bus binding
 
 Examples :
index b68be3aaf587c94401783878d9a21fc809ef7c3b..e1f6d64bdccd188497b33b67ab744293810bfab0 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/iio/adc/adi,ad7292.yaml#
@@ -53,7 +53,8 @@ patternProperties:
         description: |
           The channel number. It can have up to 8 channels numbered from 0 to 7.
         items:
-          maximum: 7
+          - minimum: 0
+            maximum: 7
 
       diff-channels:
         description: see Documentation/devicetree/bindings/iio/adc/adc.txt
index d74962c0f5ae367a8a023a24277b356f5e9b317e..15c514b83583b81cce8ac27bda28d5be6b65bab9 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A33 Thermal Sensor Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#io-channel-cells":
index b3bd8ef7fbd6101bc110fbd5cc101e9e865d8465..5b3b71c9c0183ccfe8eb00649de1d4ad7e5fdaf4 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 LRADC Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 23a202d24e437bc8c0c098c28a22e5f55d37ba06..953d875b5e74aab183787caf6aec267b9a9e7322 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Interrupt Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: /schemas/interrupt-controller.yaml#
index 8cd08cfb25bef7e1b1388e898bb51f97e8469086..cf09055da78b2c95a3548860e2bd04d179e33570 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A20 Non-Maskable Interrupt Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: /schemas/interrupt-controller.yaml#
index 684bb1cd75eca4495249626b33e88ee90c638d4f..23b18b92c5581210f658777216b66c131587dbd3 100644 (file)
@@ -17,6 +17,7 @@ Required properties:
     "amlogic,meson-axg-gpio-intc" for AXG SoCs (A113D, A113X)
     "amlogic,meson-g12a-gpio-intc" for G12A SoCs (S905D2, S905X2, S905Y2)
     "amlogic,meson-sm1-gpio-intc" for SM1 SoCs (S905D3, S905X3, S905Y3)
+    "amlogic,meson-a1-gpio-intc" for A1 SoCs (A113L)
 - reg : Specifies base physical address and size of the registers.
 - interrupt-controller : Identifies the node as an interrupt controller.
 - #interrupt-cells : Specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
new file mode 100644 (file)
index 0000000..251ed44
--- /dev/null
@@ -0,0 +1,23 @@
+Aspeed AST25XX and AST26XX SCU Interrupt Controller
+
+Required Properties:
+ - #interrupt-cells            : must be 1
+ - compatible                  : must be "aspeed,ast2500-scu-ic",
+                                 "aspeed,ast2600-scu-ic0" or
+                                 "aspeed,ast2600-scu-ic1"
+ - interrupts                  : interrupt from the parent controller
+ - interrupt-controller                : indicates that the controller receives and
+                                 fires new interrupts for child busses
+
+Example:
+
+    syscon@1e6e2000 {
+        ranges = <0 0x1e6e2000 0x1a8>;
+
+        scu_ic: interrupt-controller@18 {
+            #interrupt-cells = <1>;
+            compatible = "aspeed,ast2500-scu-ic";
+            interrupts = <21>;
+            interrupt-controller;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,intmux.yaml
new file mode 100644 (file)
index 0000000..43c6eff
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/fsl,intmux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale INTMUX interrupt multiplexer
+
+maintainers:
+  - Joakim Zhang <qiangqing.zhang@nxp.com>
+
+properties:
+  compatible:
+    const: fsl,imx-intmux
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    minItems: 1
+    maxItems: 8
+    description: |
+      Should contain the parent interrupt lines (up to 8) used to multiplex
+      the input interrupts.
+
+  interrupt-controller: true
+
+  '#interrupt-cells':
+    const: 2
+    description: |
+      The 1st cell is hw interrupt number, the 2nd cell is channel index.
+
+  clocks:
+    description: ipg clock.
+
+  clock-names:
+    const: ipg
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - interrupt-controller
+  - '#interrupt-cells'
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    interrupt-controller@37400000 {
+        compatible = "fsl,imx-intmux";
+        reg = <0x37400000 0x1000>;
+        interrupts = <0 16 4>,
+                     <0 17 4>,
+                     <0 18 4>,
+                     <0 19 4>,
+                     <0 20 4>,
+                     <0 21 4>,
+                     <0 22 4>,
+                     <0 23 4>;
+        interrupt-controller;
+        interrupt-parent = <&gic>;
+        #interrupt-cells = <2>;
+        clocks = <&clk>;
+        clock-names = "ipg";
+    };
index d3e423fcb6c2ee643529f67527eda8ba88c3d32c..0f6374ceaa697164fe55f58185ec0eeb99c0cc36 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 description: |-
   The Allwinner A10 and later has a CMOS Sensor Interface to retrieve
index dea36d68cdbedf312ee80ea5d7e5a50e1bb3f427..7838804700d66a2bfa7661280bd96ce65b7308fd 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Infrared Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: "rc.yaml#"
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml b/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
new file mode 100644 (file)
index 0000000..c9e6c22
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/fsl/imx8m-ddrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX8M DDR Controller
+
+maintainers:
+  - Leonard Crestez <leonard.crestez@nxp.com>
+
+description:
+  The DDRC block is integrated in i.MX8M for interfacing with DDR based
+  memories.
+
+  It supports switching between different frequencies at runtime but during
+  this process RAM itself becomes briefly inaccessible so actual frequency
+  switching is implemented by TF-A code which runs from a SRAM area.
+
+  The Linux driver for the DDRC doesn't even map registers (they're included
+  for the sake of "describing hardware"), it mostly just exposes firmware
+  capabilities through standard Linux mechanism like devfreq and OPP tables.
+
+properties:
+  compatible:
+    items:
+      - enum:
+        - fsl,imx8mn-ddrc
+        - fsl,imx8mm-ddrc
+        - fsl,imx8mq-ddrc
+      - const: fsl,imx8m-ddrc
+
+  reg:
+    maxItems: 1
+    description:
+      Base address and size of DDRC CTL area.
+      This is not currently mapped by the imx8m-ddrc driver.
+
+  clocks:
+    maxItems: 4
+
+  clock-names:
+    items:
+      - const: core
+      - const: pll
+      - const: alt
+      - const: apb
+
+  operating-points-v2: true
+  opp-table: true
+
+required:
+  - reg
+  - compatible
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/imx8mm-clock.h>
+    ddrc: memory-controller@3d400000 {
+        compatible = "fsl,imx8mm-ddrc", "fsl,imx8m-ddrc";
+        reg = <0x3d400000 0x400000>;
+        clock-names = "core", "pll", "alt", "apb";
+        clocks = <&clk IMX8MM_CLK_DRAM_CORE>,
+                 <&clk IMX8MM_DRAM_PLL>,
+                 <&clk IMX8MM_CLK_DRAM_ALT>,
+                 <&clk IMX8MM_CLK_DRAM_APB>;
+        operating-points-v2 = <&ddrc_opp_table>;
+    };
index 30d9fb193d7fce7a6d550a165b4842a24783a891..22a94b6fdbdee478366360a412dbc54ced745071 100644 (file)
@@ -60,7 +60,8 @@ patternProperties:
             maximum: 1066000000
 
           nvidia,emem-configuration:
-            $ref: /schemas/types.yaml#/definitions/uint32-array
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32-array
             description: |
               Values to be written to the EMEM register block. See section
               "15.6.1 MC Registers" in the TRM.
index 7fe0ca14e324f3afe603dba11c704dc961590383..e4135bac6957344b36aa42664d6c723448b66aa8 100644 (file)
@@ -56,7 +56,8 @@ patternProperties:
             maximum: 900000000
 
           nvidia,emc-auto-cal-interval:
-            $ref: /schemas/types.yaml#/definitions/uint32
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
             description:
               Pad calibration interval in microseconds.
             minimum: 0
@@ -78,7 +79,8 @@ patternProperties:
               Mode Register 0.
 
           nvidia,emc-zcal-cnt-long:
-            $ref: /schemas/types.yaml#/definitions/uint32
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32
             description:
               Number of EMC clocks to wait before issuing any commands after
               sending ZCAL_MRW_CMD.
@@ -96,7 +98,8 @@ patternProperties:
               FBIO "read" FIFO periodic resetting enabled.
 
           nvidia,emc-configuration:
-            $ref: /schemas/types.yaml#/definitions/uint32-array
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32-array
             description:
               EMC timing characterization data. These are the registers
               (see section "18.13.2 EMC Registers" in the TRM) whose values
index 84fd57bcf0dcd723e73f8182f718f23c717b8804..4b9196c832915f08adde760272762b7a83f73dbc 100644 (file)
@@ -77,7 +77,8 @@ patternProperties:
             maximum: 900000000
 
           nvidia,emem-configuration:
-            $ref: /schemas/types.yaml#/definitions/uint32-array
+            allOf:
+              - $ref: /schemas/types.yaml#/definitions/uint32-array
             description: |
               Values to be written to the EMEM register block. See section
               "18.13.1 MC Registers" in the TRM.
index 4b1a09acb98b7e59c61437221d95e2e2fff3c4d1..39afacc447b208f149d8b0ef9cb2ab981be633e2 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Resistive Touchscreen Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#thermal-sensor-cells":
index 64bca41031d513d2e766fff40ded84961239d82b..e82c9a07b6fb020f773984b3b50754af2d6f2780 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#address-cells": true
index 733b64a4d8eb73ea4424b6cb30d95c08189263eb..ae2074184528bd6ca910ffca814e7537d0d85bb9 100644 (file)
@@ -11,28 +11,43 @@ Required properties:
 - compatible: should be one of the following
   - "brcm,bcm7425-sdhci"
   - "brcm,bcm7445-sdhci"
+  - "brcm,bcm7216-sdhci"
 
 Refer to clocks/clock-bindings.txt for generic clock consumer properties.
 
 Example:
 
-       sdhci@f03e0100 {
-               compatible = "brcm,bcm7425-sdhci";
-               reg = <0xf03e0000 0x100>;
-               interrupts = <0x0 0x26 0x0>;
-               sdhci,auto-cmd12;
-               clocks = <&sw_sdio>;
+       sdhci@84b0000 {
                sd-uhs-sdr50;
                sd-uhs-ddr50;
+               sd-uhs-sdr104;
+               sdhci,auto-cmd12;
+               compatible = "brcm,bcm7216-sdhci",
+                          "brcm,bcm7445-sdhci",
+                          "brcm,sdhci-brcmstb";
+               reg = <0x84b0000 0x260 0x84b0300 0x200>;
+               reg-names = "host", "cfg";
+               interrupts = <0x0 0x26 0x4>;
+               interrupt-names = "sdio0_0";
+               clocks = <&scmi_clk 245>;
+               clock-names = "sw_sdio";
        };
 
-       sdhci@f03e0300 {
+       sdhci@84b1000 {
+               mmc-ddr-1_8v;
+               mmc-hs200-1_8v;
+               mmc-hs400-1_8v;
+               mmc-hs400-enhanced-strobe;
+               supports-cqe;
                non-removable;
                bus-width = <0x8>;
-               compatible = "brcm,bcm7425-sdhci";
-               reg = <0xf03e0200 0x100>;
-               interrupts = <0x0 0x27 0x0>;
-               sdhci,auto-cmd12;
-               clocks = <sw_sdio>;
-               mmc-hs200-1_8v;
+               compatible = "brcm,bcm7216-sdhci",
+                          "brcm,bcm7445-sdhci",
+                          "brcm,sdhci-brcmstb";
+               reg = <0x84b1000 0x260 0x84b1300 0x200>;
+               reg-names = "host", "cfg";
+               interrupts = <0x0 0x27 0x4>;
+               interrupt-names = "sdio1_0";
+               clocks = <&scmi_clk 245>;
+               clock-names = "sw_sdio";
        };
index 2fb466ca2a9de9241932b796ce58763e43b1ccb5..c93643fceabb1fc6153de4c78904248764bf017a 100644 (file)
@@ -21,6 +21,7 @@ Required properties:
               "fsl,imx8mq-usdhc"
               "fsl,imx8mm-usdhc"
               "fsl,imx8mn-usdhc"
+              "fsl,imx8mp-usdhc"
               "fsl,imx8qxp-usdhc"
 
 Optional properties:
index bc08fc43a9be466bdf909ac6bf2cedc021209b7f..e6cc47844207049b26f414ef0c12d954b45bea02 100644 (file)
@@ -23,7 +23,8 @@ Required properties:
                "renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC
                "renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
                "renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
-               "renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+               "renesas,sdhi-r8a7796" - SDHI IP on R8A77960 SoC
+               "renesas,sdhi-r8a77961" - SDHI IP on R8A77961 SoC
                "renesas,sdhi-r8a77965" - SDHI IP on R8A77965 SoC
                "renesas,sdhi-r8a77970" - SDHI IP on R8A77970 SoC
                "renesas,sdhi-r8a77980" - SDHI IP on R8A77980 SoC
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
deleted file mode 100644 (file)
index 6f629b1..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-* Rockchip specific extensions to the Synopsys Designware Mobile
-  Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsys dw mshc controller properties described
-by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
-       - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
-                                                       before RK3288
-       - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
-       - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
-       - "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip PX30
-       - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
-       - "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK322x
-       - "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
-       - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
-       - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
-
-Optional Properties:
-* clocks: from common clock binding: if ciu-drive and ciu-sample are
-  specified in clock-names, should contain handles to these clocks.
-
-* clock-names: Apart from the clock-names described in synopsys-dw-mshc.txt
-  two more clocks "ciu-drive" and "ciu-sample" are supported. They are used
-  to control the clock phases, "ciu-sample" is required for tuning high-
-  speed modes.
-
-* rockchip,default-sample-phase: The default phase to set ciu-sample at
-  probing, low speeds or in case where all phases work at tuning time.
-  If not specified 0 deg will be used.
-
-* rockchip,desired-num-phases: The desired number of times that the host
-  execute tuning when needed. If not specified, the host will do tuning
-  for 360 times, namely tuning for each degree.
-
-Example:
-
-       rkdwmmc0@12200000 {
-               compatible = "rockchip,rk3288-dw-mshc";
-               reg = <0x12200000 0x1000>;
-               interrupts = <0 75 0>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
new file mode 100644 (file)
index 0000000..89c3edd
--- /dev/null
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/rockchip-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip designware mobile storage host controller device tree bindings
+
+description:
+  Rockchip uses the Synopsys designware mobile storage host controller
+  to interface a SoC with storage medium such as eMMC or SD/MMC cards.
+  This file documents the combined properties for the core Synopsys dw mshc
+  controller that are not already included in the synopsys-dw-mshc-common.yaml
+  file and the Rockchip specific extensions.
+
+allOf:
+  - $ref: "synopsys-dw-mshc-common.yaml#"
+
+maintainers:
+  - Heiko Stuebner <heiko@sntech.de>
+
+# Everything else is described in the common file
+properties:
+  compatible:
+    oneOf:
+      # for Rockchip RK2928 and before RK3288
+      - const: rockchip,rk2928-dw-mshc
+      # for Rockchip RK3288
+      - const: rockchip,rk3288-dw-mshc
+      - items:
+          - enum:
+            # for Rockchip PX30
+            - rockchip,px30-dw-mshc
+            # for Rockchip RK3036
+            - rockchip,rk3036-dw-mshc
+            # for Rockchip RK322x
+            - rockchip,rk3228-dw-mshc
+            # for Rockchip RK3308
+            - rockchip,rk3308-dw-mshc
+            # for Rockchip RK3328
+            - rockchip,rk3328-dw-mshc
+            # for Rockchip RK3368
+            - rockchip,rk3368-dw-mshc
+            # for Rockchip RK3399
+            - rockchip,rk3399-dw-mshc
+            # for Rockchip RV1108
+            - rockchip,rv1108-dw-mshc
+          - const: rockchip,rk3288-dw-mshc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 4
+    description:
+      Handle to "biu" and "ciu" clocks for the bus interface unit clock and
+      the card interface unit clock. If "ciu-drive" and "ciu-sample" are
+      specified in clock-names, it should also contain
+      handles to these clocks.
+
+  clock-names:
+    minItems: 2
+    items:
+      - const: biu
+      - const: ciu
+      - const: ciu-drive
+      - const: ciu-sample
+    description:
+      Apart from the clock-names "biu" and "ciu" two more clocks
+      "ciu-drive" and "ciu-sample" are supported. They are used
+      to control the clock phases, "ciu-sample" is required for tuning
+      high speed modes.
+
+  rockchip,default-sample-phase:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 360
+    default: 0
+    description:
+      The default phase to set "ciu-sample" at probing,
+      low speeds or in case where all phases work at tuning time.
+      If not specified 0 deg will be used.
+
+  rockchip,desired-num-phases:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 360
+    default: 360
+    description:
+      The desired number of times that the host execute tuning when needed.
+      If not specified, the host will do tuning for 360 times,
+      namely tuning for each degree.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3288-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    sdmmc: mmc@ff0c0000 {
+      compatible = "rockchip,rk3288-dw-mshc";
+      reg = <0x0 0xff0c0000 0x0 0x4000>;
+      interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+               <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+      clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+      resets = <&cru SRST_MMC0>;
+      reset-names = "reset";
+      fifo-depth = <0x100>;
+      max-frequency = <150000000>;
+    };
+
+...
index 503c6dbac1b271024e30b8ac28f98bff0bbff9aa..69edfd4d392269219e072c56fad3a64b96d3d77b 100644 (file)
@@ -5,11 +5,16 @@ Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the
 sdhci-of-at91 driver.
 
 Required properties:
-- compatible:          Must be "atmel,sama5d2-sdhci".
+- compatible:          Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci".
 - clocks:              Phandlers to the clocks.
-- clock-names:         Must be "hclock", "multclk", "baseclk";
+- clock-names:         Must be "hclock", "multclk", "baseclk" for
+                       "atmel,sama5d2-sdhci".
+                       Must be "hclock", "multclk" for "microchip,sam9x60-sdhci".
 
 Optional properties:
+- assigned-clocks:     The same with "multclk".
+- assigned-clock-rates The rate of "multclk" in order to not rely on the
+                       gck configuration set by previous components.
 - microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is
   inverted. The default polarity for this signal is described in the datasheet.
   For instance on SAMA5D2, the pin is usually tied to the GND with a resistor
@@ -17,10 +22,12 @@ Optional properties:
 
 Example:
 
-sdmmc0: sdio-host@a0000000 {
+mmc0: sdio-host@a0000000 {
        compatible = "atmel,sama5d2-sdhci";
        reg = <0xa0000000 0x300>;
        interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
        clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>;
        clock-names = "hclock", "multclk", "baseclk";
+       assigned-clocks = <&sdmmc0_gclk>;
+       assigned-clock-rates = <480000000>;
 };
index da4edb146a983f432ca02f739e2483dd900098a1..7ee639b1af039afe12d46a1eca7ed21288846902 100644 (file)
@@ -19,6 +19,7 @@ Required properties:
                "qcom,msm8996-sdhci", "qcom,sdhci-msm-v4"
                "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
                "qcom,qcs404-sdhci", "qcom,sdhci-msm-v5"
+               "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
        NOTE that some old device tree files may be floating around that only
        have the string "qcom,sdhci-msm-v4" without the SoC compatible string
        but doing that should be considered a deprecated practice.
index 72c4dec7e1db391244bf4ec3da1a7e2bcc43cd86..aeb615ef672a77433d47cb94c598c0e7c29a710a 100644 (file)
@@ -7,6 +7,8 @@ For UHS devices which require tuning, the device tree should have a "cpu_thermal
 Required properties:
 - compatible: Should be "ti,dra7-sdhci" for DRA7 and DRA72 controllers
              Should be "ti,k2g-sdhci" for K2G
+             Should be "ti,am335-sdhci" for am335x controllers
+             Should be "ti,am437-sdhci" for am437x controllers
 - ti,hwmods: Must be "mmc<n>", <n> is controller instance starting 1
             (Not required for K2G).
 - pinctrl-names: Should be subset of "default", "hs", "sdr12", "sdr25", "sdr50",
@@ -15,6 +17,13 @@ Required properties:
                 "hs200_1_8v",
 - pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
 
+Optional properties:
+- dmas:                List of DMA specifiers with the controller specific format as described
+               in the generic DMA client binding. A tx and rx specifier is required.
+- dma-names:   List of DMA request names. These strings correspond 1:1 with the
+               DMA specifiers listed in dmas. The string naming is to be "tx"
+               and "rx" for TX and RX DMA requests, respectively.
+
 Example:
        mmc1: mmc@4809c000 {
                compatible = "ti,dra7-sdhci";
@@ -22,4 +31,6 @@ Example:
                ti,hwmods = "mmc1";
                bus-width = <4>;
                vmmc-supply = <&vmmc>; /* phandle to regulator node */
+               dmas = <&sdma 61 &sdma 62>;
+               dma-names = "tx", "rx";
        };
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml
new file mode 100644 (file)
index 0000000..890d47a
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Mobile Storage Host Controller Common Properties
+
+allOf:
+  - $ref: "mmc-controller.yaml#"
+
+maintainers:
+  - Ulf Hansson <ulf.hansson@linaro.org>
+
+# Everything else is described in the common file
+properties:
+  resets:
+    maxItems: 1
+
+  reset-names:
+    const: reset
+
+  clock-frequency:
+    description:
+      Should be the frequency (in Hz) of the ciu clock.  If this
+      is specified and the ciu clock is specified then we'll try to set the ciu
+      clock to this at probe time.
+
+  fifo-depth:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      The maximum size of the tx/rx fifo's. If this property is not
+      specified, the default value of the fifo size is determined from the
+      controller registers.
+
+  card-detect-delay:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - default: 0
+    description:
+      Delay in milli-seconds before detecting card after card
+      insert event. The default value is 0.
+
+  data-addr:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      Override fifo address with value provided by DT. The default FIFO reg
+      offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A)
+      by driver. If the controller does not follow this rule, please use
+      this property to set fifo address in device tree.
+
+  fifo-watermark-aligned:
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      Data done irq is expected if data length is less than
+      watermark in PIO mode. But fifo watermark is requested to be aligned
+      with data length in some SoC so that TX/RX irq can be generated with
+      data done irq. Add this watermark quirk to mark this requirement and
+      force fifo watermark setting accordingly.
+
+  dmas:
+    maxItems: 1
+
+  dma-names:
+    const: rx-tx
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
deleted file mode 100644 (file)
index 7e5e427..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-* Synopsys Designware Mobile Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
-       - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
-* #address-cells: should be 1.
-* #size-cells: should be 0.
-
-# Slots (DEPRECATED): The slot specific information are contained within
-  child-nodes with each child-node representing a supported slot. There should
-  be atleast one child node representing a card slot. The name of the child node
-  representing the slot is recommended to be slot@n where n is the unique number
-  of the slot connected to the controller. The following are optional properties
-  which can be included in the slot child node.
-
-       * reg: specifies the physical slot number. The valid values of this
-         property is 0 to (num-slots -1), where num-slots is the value
-         specified by the num-slots property.
-
-       * bus-width: as documented in mmc core bindings.
-
-       * wp-gpios: specifies the write protect gpio line. The format of the
-         gpio specifier depends on the gpio controller. If a GPIO is not used
-         for write-protect, this property is optional.
-
-       * disable-wp: If the wp-gpios property isn't present then (by default)
-         we'd assume that the write protect is hooked up directly to the
-         controller's special purpose write protect line (accessible via
-         the WRTPRT register).  However, it's possible that we simply don't
-         want write protect.  In that case specify 'disable-wp'.
-         NOTE: This property is not required for slots known to always
-         connect to eMMC or SDIO cards.
-
-Optional properties:
-
-* resets: phandle + reset specifier pair, intended to represent hardware
-  reset signal present internally in some host controller IC designs.
-  See Documentation/devicetree/bindings/reset/reset.txt for details.
-
-* reset-names: request name for using "resets" property. Must be "reset".
-       (It will be used together with "resets" property.)
-
-* clocks: from common clock binding: handle to biu and ciu clocks for the
-  bus interface unit clock and the card interface unit clock.
-
-* clock-names: from common clock binding: Shall be "biu" and "ciu".
-  If the biu clock is missing we'll simply skip enabling it.  If the
-  ciu clock is missing we'll just assume that the clock is running at
-  clock-frequency.  It is an error to omit both the ciu clock and the
-  clock-frequency.
-
-* clock-frequency: should be the frequency (in Hz) of the ciu clock.  If this
-  is specified and the ciu clock is specified then we'll try to set the ciu
-  clock to this at probe time.
-
-* fifo-depth: The maximum size of the tx/rx fifo's. If this property is not
-  specified, the default value of the fifo size is determined from the
-  controller registers.
-
-* card-detect-delay: Delay in milli-seconds before detecting card after card
-  insert event. The default value is 0.
-
-* data-addr: Override fifo address with value provided by DT. The default FIFO reg
-  offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A) by
-  driver. If the controller does not follow this rule, please use this property
-  to set fifo address in device tree.
-
-* fifo-watermark-aligned: Data done irq is expected if data length is less than
-  watermark in PIO mode. But fifo watermark is requested to be aligned with data
-  length in some SoC so that TX/RX irq can be generated with data done irq. Add this
-  watermark quirk to mark this requirement and force fifo watermark setting
-  accordingly.
-
-* vmmc-supply: The phandle to the regulator to use for vmmc.  If this is
-  specified we'll defer probe until we can find this regulator.
-
-* dmas: List of DMA specifiers with the controller specific format as described
-  in the generic DMA client binding. Refer to dma.txt for details.
-
-* dma-names: request names for generic DMA client binding. Must be "rx-tx".
-  Refer to dma.txt for details.
-
-Aliases:
-
-- All the MSHC controller nodes should be represented in the aliases node using
-  the following format 'mshc{n}' where n is a unique number for the alias.
-
-Example:
-
-The MSHC controller node can be split into two portions, SoC specific and
-board specific portions as listed below.
-
-       dwmmc0@12200000 {
-               compatible = "snps,dw-mshc";
-               clocks = <&clock 351>, <&clock 132>;
-               clock-names = "biu", "ciu";
-               reg = <0x12200000 0x1000>;
-               interrupts = <0 75 0>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               data-addr = <0x200>;
-               fifo-watermark-aligned;
-               resets = <&rst 20>;
-               reset-names = "reset";
-       };
-
-[board specific internal DMA resources]
-
-       dwmmc0@12200000 {
-               clock-frequency = <400000000>;
-               clock-freq-min-max = <400000 200000000>;
-               broken-cd;
-               fifo-depth = <0x80>;
-               card-detect-delay = <200>;
-               vmmc-supply = <&buck8>;
-               bus-width = <8>;
-               cap-mmc-highspeed;
-               cap-sd-highspeed;
-       };
-
-[board specific generic DMA request binding]
-
-       dwmmc0@12200000 {
-               clock-frequency = <400000000>;
-               clock-freq-min-max = <400000 200000000>;
-               broken-cd;
-               fifo-depth = <0x80>;
-               card-detect-delay = <200>;
-               vmmc-supply = <&buck8>;
-               bus-width = <8>;
-               cap-mmc-highspeed;
-               cap-sd-highspeed;
-               dmas = <&pdma 12>;
-               dma-names = "rx-tx";
-       };
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.yaml
new file mode 100644 (file)
index 0000000..05f9f36
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Mobile Storage Host Controller Binding
+
+allOf:
+  - $ref: "synopsys-dw-mshc-common.yaml#"
+
+maintainers:
+  - Ulf Hansson <ulf.hansson@linaro.org>
+
+# Everything else is described in the common file
+properties:
+  compatible:
+    const: snps,dw-mshc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 2
+    maxItems: 2
+    description:
+      Handle to "biu" and "ciu" clocks for the
+      bus interface unit clock and the card interface unit clock.
+
+  clock-names:
+    items:
+      - const: biu
+      - const: ciu
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+examples:
+  - |
+    mmc@12200000 {
+      compatible = "snps,dw-mshc";
+      reg = <0x12200000 0x1000>;
+      interrupts = <0 75 0>;
+      clocks = <&clock 351>, <&clock 132>;
+      clock-names = "biu", "ciu";
+      dmas = <&pdma 12>;
+      dma-names = "rx-tx";
+      resets = <&rst 20>;
+      reset-names = "reset";
+      vmmc-supply = <&buck8>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+      broken-cd;
+      bus-width = <8>;
+      cap-mmc-highspeed;
+      cap-sd-highspeed;
+      card-detect-delay = <200>;
+      clock-freq-min-max = <400000 200000000>;
+      clock-frequency = <400000000>;
+      data-addr = <0x200>;
+      fifo-depth = <0x80>;
+      fifo-watermark-aligned;
+    };
index b5b3cf5b1ac236770c0a741c934f6edea65dee00..5d3fa412aabd3f91b12463af68990517e3466df5 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#address-cells": true
index ae4796ec50a0a6612bfc26e70b898d1237713971..8d8560a67abf2750787afa144c06c2e43b702a62 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index e5562c525ed94baecc2ef24f7b313b01fb1e8a76..767193ec1d3285ec0ebe71a505c543c92c876de9 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 MDIO Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: "mdio.yaml#"
index f683b7104e3eb424092324a4ea5783b1b59a15c9..703d0d8868846dde2ce14a0fafef6acd264bce28 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 11654d4b80fb7616c73c81660dec001729b9806f..db36b4d8648449927a419961e7308c2151582a76 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A83t EMAC Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 770af7c46114024880da0f5d0e137296b7546b69..a95960ee3feba1077ae7ed0aa6c4d43c8d6e5467 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 CAN Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 27e1b4cebfbd42ad0923bf6bd90521a50a7a925b..6bdcc3f84bd3c4ec25b23bd04309d4b4b322a9dd 100644 (file)
@@ -10,7 +10,6 @@ Required properties:
        - #size-cells: 0
        - spi-max-frequency: Maximum frequency of the SPI bus the chip can
                             operate at should be less than or equal to 18 MHz.
-       - device-wake-gpios: Wake up GPIO to wake up the TCAN device.
        - interrupt-parent: the phandle to the interrupt controller which provides
                     the interrupt.
        - interrupts: interrupt specification for data-ready.
@@ -23,6 +22,7 @@ Optional properties:
                       reset.
        - device-state-gpios: Input GPIO that indicates if the device is in
                              a sleep state or if the device is active.
+       - device-wake-gpios: Wake up GPIO to wake up the TCAN device.
 
 Example:
 tcan4x5x: tcan4x5x@0 {
@@ -36,5 +36,5 @@ tcan4x5x: tcan4x5x@0 {
                interrupts = <14 GPIO_ACTIVE_LOW>;
                device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
                device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
-               reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
+               reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
 };
index 299c0dcd67db456fd8ded1b9ff71719ecaef9d2c..250f8d8cdce4bc17d0acc1bdb3ee2bb7ecf3fb34 100644 (file)
@@ -403,6 +403,19 @@ PROPERTIES
                The settings and programming routines for internal/external
                MDIO are different. Must be included for internal MDIO.
 
+- fsl,erratum-a011043
+               Usage: optional
+               Value type: <boolean>
+               Definition: Indicates the presence of the A011043 erratum
+               describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
+               set when reading internal PCS registers. MDIO reads to
+               internal PCS registers may result in having the
+               MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
+               read data (MDIO_DATA[MDIO_DATA]) is correct.
+               Software may get false read error when reading internal
+               PCS registers through MDIO. As a workaround, all internal
+               MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
+
 For internal PHY device on internal mdio bus, a PHY node should be created.
 See the definition of the PHY node in booting-without-of.txt for an
 example of how to define a PHY (Internal PHY has no interrupt line).
index 4845e29411e460151458a97a39256cff9deff8e7..e08cd4c4d5682f7e7e7b53df24c6b36891d28226 100644 (file)
@@ -347,6 +347,7 @@ allOf:
               - st,spear600-gmac
 
     then:
+      properties:
         snps,tso:
           $ref: /schemas/types.yaml#definitions/flag
           description:
index 81ae8cafabc1444b584d8cfc393b433287fbe2ff..ac8c76369a867b55e37a01fa51446e611889c13f 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/net/ti,cpsw-switch.yaml#
@@ -44,7 +44,6 @@ properties:
     description: CPSW functional clock
 
   clock-names:
-    maxItems: 1
     items:
       - const: fck
 
@@ -70,7 +69,6 @@ properties:
       Phandle to the system control device node which provides access to
       efuse IO range with MAC addresses
 
-
   ethernet-ports:
     type: object
     properties:
@@ -82,8 +80,6 @@ properties:
     patternProperties:
       "^port@[0-9]+$":
           type: object
-          minItems: 1
-          maxItems: 2
           description: CPSW external ports
 
           allOf:
@@ -91,23 +87,20 @@ properties:
 
           properties:
             reg:
-              maxItems: 1
-              enum: [1, 2]
+              items:
+                - enum: [1, 2]
               description: CPSW port number
 
             phys:
-              $ref: /schemas/types.yaml#definitions/phandle-array
               maxItems: 1
               description:  phandle on phy-gmii-sel PHY
 
             label:
-              $ref: /schemas/types.yaml#/definitions/string-array
-              maxItems: 1
               description: label associated with this port
 
             ti,dual-emac-pvid:
-              $ref: /schemas/types.yaml#/definitions/uint32
-              maxItems: 1
+              allOf:
+                - $ref: /schemas/types.yaml#/definitions/uint32
               minimum: 1
               maximum: 1024
               description:
@@ -136,7 +129,6 @@ properties:
         description: CPTS reference clock
 
       clock-names:
-        maxItems: 1
         items:
           - const: cpts
 
@@ -201,7 +193,7 @@ examples:
                         phys = <&phy_gmii_sel 1>;
                         phy-handle = <&ethphy0_sw>;
                         phy-mode = "rgmii";
-                        ti,dual_emac_pvid = <1>;
+                        ti,dual-emac-pvid = <1>;
                 };
 
                 cpsw_port2: port@2 {
@@ -211,7 +203,7 @@ examples:
                         phys = <&phy_gmii_sel 2>;
                         phy-handle = <&ethphy1_sw>;
                         phy-mode = "rgmii";
-                        ti,dual_emac_pvid = <2>;
+                        ti,dual-emac-pvid = <2>;
                 };
         };
 
index 659b02002a35c24148cad4dbdf12e43627a19143..daf1321d76ad8356bd7f636ad4c101e39a102549 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Security ID Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 allOf:
   - $ref: "nvmem.yaml#"
index fa46670de2992dd1bde4809e397a905e8f77e72f..230d74f22136c6cee6d6170627e6e982858a3e50 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#phy-cells":
index cd0503b6fe36fff23f47ab53c03da3b8ab534257..bfefd09d8c1e43ab3abba75930decf004b7a95af 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Pin Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#gpio-cells":
diff --git a/Documentation/devicetree/bindings/power/avs/qcom,cpr.txt b/Documentation/devicetree/bindings/power/avs/qcom,cpr.txt
new file mode 100644 (file)
index 0000000..ab0d5eb
--- /dev/null
@@ -0,0 +1,130 @@
+QCOM CPR (Core Power Reduction)
+
+CPR (Core Power Reduction) is a technology to reduce core power on a CPU
+or other device. Each OPP of a device corresponds to a "corner" that has
+a range of valid voltages for a particular frequency. While the device is
+running at a particular frequency, CPR monitors dynamic factors such as
+temperature, etc. and suggests adjustments to the voltage to save power
+and meet silicon characteristic requirements.
+
+- compatible:
+       Usage: required
+       Value type: <string>
+       Definition: should be "qcom,qcs404-cpr", "qcom,cpr" for qcs404
+
+- reg:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: base address and size of the rbcpr register region
+
+- interrupts:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: should specify the CPR interrupt
+
+- clocks:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: phandle to the reference clock
+
+- clock-names:
+       Usage: required
+       Value type: <stringlist>
+       Definition: must be "ref"
+
+- vdd-apc-supply:
+       Usage: required
+       Value type: <phandle>
+       Definition: phandle to the vdd-apc-supply regulator
+
+- #power-domain-cells:
+       Usage: required
+       Value type: <u32>
+       Definition: should be 0
+
+- operating-points-v2:
+       Usage: required
+       Value type: <phandle>
+       Definition: A phandle to the OPP table containing the
+                   performance states supported by the CPR
+                   power domain
+
+- acc-syscon:
+       Usage: optional
+       Value type: <phandle>
+       Definition: phandle to syscon for writing ACC settings
+
+- nvmem-cells:
+       Usage: required
+       Value type: <phandle>
+       Definition: phandle to nvmem cells containing the data
+                   that makes up a fuse corner, for each fuse corner.
+                   As well as the CPR fuse revision.
+
+- nvmem-cell-names:
+       Usage: required
+       Value type: <stringlist>
+       Definition: should be "cpr_quotient_offset1", "cpr_quotient_offset2",
+                   "cpr_quotient_offset3", "cpr_init_voltage1",
+                   "cpr_init_voltage2", "cpr_init_voltage3", "cpr_quotient1",
+                   "cpr_quotient2", "cpr_quotient3", "cpr_ring_osc1",
+                   "cpr_ring_osc2", "cpr_ring_osc3", "cpr_fuse_revision"
+                   for qcs404.
+
+Example:
+
+       cpr_opp_table: cpr-opp-table {
+               compatible = "operating-points-v2-qcom-level";
+
+               cpr_opp1: opp1 {
+                       opp-level = <1>;
+                       qcom,opp-fuse-level = <1>;
+               };
+               cpr_opp2: opp2 {
+                       opp-level = <2>;
+                       qcom,opp-fuse-level = <2>;
+               };
+               cpr_opp3: opp3 {
+                       opp-level = <3>;
+                       qcom,opp-fuse-level = <3>;
+               };
+       };
+
+       power-controller@b018000 {
+               compatible = "qcom,qcs404-cpr", "qcom,cpr";
+               reg = <0x0b018000 0x1000>;
+               interrupts = <0 15 IRQ_TYPE_EDGE_RISING>;
+               clocks = <&xo_board>;
+               clock-names = "ref";
+               vdd-apc-supply = <&pms405_s3>;
+               #power-domain-cells = <0>;
+               operating-points-v2 = <&cpr_opp_table>;
+               acc-syscon = <&tcsr>;
+
+               nvmem-cells = <&cpr_efuse_quot_offset1>,
+                       <&cpr_efuse_quot_offset2>,
+                       <&cpr_efuse_quot_offset3>,
+                       <&cpr_efuse_init_voltage1>,
+                       <&cpr_efuse_init_voltage2>,
+                       <&cpr_efuse_init_voltage3>,
+                       <&cpr_efuse_quot1>,
+                       <&cpr_efuse_quot2>,
+                       <&cpr_efuse_quot3>,
+                       <&cpr_efuse_ring1>,
+                       <&cpr_efuse_ring2>,
+                       <&cpr_efuse_ring3>,
+                       <&cpr_efuse_revision>;
+               nvmem-cell-names = "cpr_quotient_offset1",
+                       "cpr_quotient_offset2",
+                       "cpr_quotient_offset3",
+                       "cpr_init_voltage1",
+                       "cpr_init_voltage2",
+                       "cpr_init_voltage3",
+                       "cpr_quotient1",
+                       "cpr_quotient2",
+                       "cpr_quotient3",
+                       "cpr_ring_osc1",
+                       "cpr_ring_osc2",
+                       "cpr_ring_osc3",
+                       "cpr_fuse_revision";
+       };
index 0ac52f83a58cce4f222f20a266fc2d4528416284..4a21fe77ee1d34e650183704b671b74de222e23c 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 PWM Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#pwm-cells":
diff --git a/Documentation/devicetree/bindings/regulator/mp8859.txt b/Documentation/devicetree/bindings/regulator/mp8859.txt
new file mode 100644 (file)
index 0000000..74ad697
--- /dev/null
@@ -0,0 +1,22 @@
+Monolithic Power Systems MP8859 voltage regulator
+
+Required properties:
+- compatible: "mps,mp8859";
+- reg: I2C slave address.
+
+Optional subnode for regulator: "mp8859_dcdc", using common regulator
+bindings given in <Documentation/devicetree/bindings/regulator/regulator.txt>.
+
+Example:
+
+       mp8859: regulator@66 {
+               compatible = "mps,mp8859";
+               reg = <0x66>;
+               dc_12v: mp8859_dcdc {
+                       regulator-name = "dc_12v";
+                       regulator-min-microvolt = <12000000>;
+                       regulator-max-microvolt = <12000000>;
+                       regulator-boot-on;
+                       regulator-always-on;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
new file mode 100644 (file)
index 0000000..a682af0
--- /dev/null
@@ -0,0 +1,121 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mps,mpq7920.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Monolithic Power System MPQ7920 PMIC
+
+maintainers:
+  - Saravanan Sekar <sravanhome@gmail.com>
+
+properties:
+  $nodename:
+    pattern: "pmic@[0-9a-f]{1,2}"
+  compatible:
+    enum:
+      - mps,mpq7920
+
+  reg:
+    maxItems: 1
+
+  regulators:
+    type: object
+    allOf:
+      - $ref: regulator.yaml#
+    description: |
+      list of regulators provided by this controller, must be named
+      after their hardware counterparts BUCK[1-4], one LDORTC, and LDO[2-5]
+
+    properties:
+      mps,switch-freq:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/uint8"
+        enum: [ 0, 1, 2, 3 ]
+        default: 2
+        description: |
+          switching frequency must be one of following corresponding value
+          1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz
+
+    patternProperties:
+      "^ldo[1-4]$":
+        type: object
+        allOf:
+          - $ref: regulator.yaml#
+
+      "^ldortc$":
+        type: object
+        allOf:
+          - $ref: regulator.yaml#
+
+      "^buck[1-4]$":
+        type: object
+        allOf:
+          - $ref: regulator.yaml#
+
+        properties:
+          mps,buck-softstart:
+            allOf:
+              - $ref: "/schemas/types.yaml#/definitions/uint8"
+            enum: [ 0, 1, 2, 3 ]
+            description: |
+              defines the soft start time of this buck, must be one of the following
+              corresponding values 150us, 300us, 610us, 920us
+
+          mps,buck-phase-delay:
+            allOf:
+              - $ref: "/schemas/types.yaml#/definitions/uint8"
+            enum: [ 0, 1, 2, 3 ]
+            description: |
+              defines the phase delay of this buck, must be one of the following
+              corresponding values 0deg, 90deg, 180deg, 270deg
+
+          mps,buck-ovp-disable:
+            type: boolean
+            description: |
+              disables over voltage protection of this buck
+
+      additionalProperties: false
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pmic@69 {
+          compatible = "mps,mpq7920";
+          reg = <0x69>;
+
+          regulators {
+            mps,switch-freq = /bits/ 8 <1>;
+
+            buck1 {
+             regulator-name = "buck1";
+             regulator-min-microvolt = <400000>;
+             regulator-max-microvolt = <3587500>;
+             regulator-min-microamp  = <460000>;
+             regulator-max-microamp  = <7600000>;
+             regulator-boot-on;
+             mps,buck-ovp-disable;
+             mps,buck-phase-delay = /bits/ 8 <2>;
+             mps,buck-softstart = /bits/ 8 <1>;
+            };
+
+            ldo2 {
+             regulator-name = "ldo2";
+             regulator-min-microvolt = <650000>;
+             regulator-max-microvolt = <3587500>;
+            };
+         };
+       };
+     };
+...
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml
new file mode 100644 (file)
index 0000000..71ce032
--- /dev/null
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/rohm,bd71828-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD71828 Power Management Integrated Circuit regulators
+
+maintainers:
+  - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description: |
+  This module is part of the ROHM BD71828 MFD device. For more details
+  see Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml.
+
+  The regulator controller is represented as a sub-node of the PMIC node
+  on the device tree.
+
+  Regulator nodes should be named to BUCK_<number> and LDO_<number>.
+  The valid names for BD71828 regulator nodes are
+  BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, BUCK7
+  LDO1, LDO2, LDO3, LDO4, LDO5, LDO6, LDO7
+
+patternProperties:
+  "^LDO[1-7]$":
+    type: object
+    allOf:
+      - $ref: regulator.yaml#
+    description:
+      Properties for single LDO regulator.
+
+    properties:
+      regulator-name:
+        pattern: "^ldo[1-7]$"
+        description:
+          should be "ldo1", ..., "ldo7"
+
+  "^BUCK[1-7]$":
+    type: object
+    allOf:
+      - $ref: regulator.yaml#
+    description:
+      Properties for single BUCK regulator.
+
+    properties:
+      regulator-name:
+        pattern: "^buck[1-7]$"
+        description:
+          should be "buck1", ..., "buck7"
+
+      rohm,dvs-run-voltage:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/uint32"
+          - minimum: 0
+            maximum: 3300000
+        description:
+          PMIC default "RUN" state voltage in uV. See below table for
+          bucks which support this. 0 means disabled.
+
+      rohm,dvs-idle-voltage:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/uint32"
+          - minimum: 0
+            maximum: 3300000
+        description:
+          PMIC default "IDLE" state voltage in uV. See below table for
+          bucks which support this. 0 means disabled.
+
+      rohm,dvs-suspend-voltage:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/uint32"
+          - minimum: 0
+            maximum: 3300000
+        description:
+          PMIC default "SUSPEND" state voltage in uV. See below table for
+          bucks which support this. 0 means disabled.
+
+      rohm,dvs-lpsr-voltage:
+        allOf:
+          - $ref: "/schemas/types.yaml#/definitions/uint32"
+          - minimum: 0
+            maximum: 3300000
+        description:
+          PMIC default "LPSR" state voltage in uV. See below table for
+          bucks which support this. 0 means disabled.
+
+        # Supported default DVS states:
+        #     buck       |    run     |   idle    | suspend  | lpsr
+        #--------------------------------------------------------------
+        # 1, 2, 6, and 7 | supported  | supported | supported (*)
+        #--------------------------------------------------------------
+        # 3, 4, and 5    |                    supported (**)
+        #--------------------------------------------------------------
+        #
+        #(*)  LPSR and SUSPEND states use same voltage but both states have own
+        #     enable /
+        #     disable settings. Voltage 0 can be specified for a state to make
+        #     regulator disabled on that state.
+        #
+        #(**) All states use same voltage but have own enable / disable
+        #     settings. Voltage 0 can be specified for a state to make
+        #     regulator disabled on that state.
+
+    required:
+      - regulator-name
+  additionalProperties: false
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-booster.txt b/Documentation/devicetree/bindings/regulator/st,stm32-booster.txt
deleted file mode 100644 (file)
index 479ad4c..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-STM32 BOOSTER - Booster for ADC analog input switches
-
-Some STM32 devices embed a 3.3V booster supplied by Vdda, that can be used
-to supply ADC analog input switches.
-
-Required properties:
-- compatible: Should be one of:
-  "st,stm32h7-booster"
-  "st,stm32mp1-booster"
-- st,syscfg: Phandle to system configuration controller.
-- vdda-supply: Phandle to the vdda input analog voltage.
-
-Example:
-       booster: regulator-booster {
-               compatible = "st,stm32mp1-booster";
-               st,syscfg = <&syscfg>;
-               vdda-supply = <&vdda>;
-       };
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml b/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml
new file mode 100644 (file)
index 0000000..64f1183
--- /dev/null
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/st,stm32-booster.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 booster for ADC analog input switches bindings
+
+maintainers:
+  - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+description: |
+  Some STM32 devices embed a 3.3V booster supplied by Vdda, that can be used
+  to supply ADC analog input switches.
+
+allOf:
+  - $ref: "regulator.yaml#"
+
+properties:
+  compatible:
+    enum:
+      - st,stm32h7-booster
+      - st,stm32mp1-booster
+
+  st,syscfg:
+    allOf:
+      - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+    description: phandle to system configuration controller.
+
+  vdda-supply:
+    description: phandle to the vdda input analog voltage.
+
+required:
+  - compatible
+  - st,syscfg
+  - vdda-supply
+
+examples:
+  - |
+    regulator-booster {
+      compatible = "st,stm32mp1-booster";
+      st,syscfg = <&syscfg>;
+      vdda-supply = <&vdda>;
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt
deleted file mode 100644 (file)
index 5ddb850..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-STM32 VREFBUF - Voltage reference buffer
-
-Some STM32 devices embed a voltage reference buffer which can be used as
-voltage reference for ADCs, DACs and also as voltage reference for external
-components through the dedicated VREF+ pin.
-
-Required properties:
-- compatible:          Must be "st,stm32-vrefbuf".
-- reg:                 Offset and length of VREFBUF register set.
-- clocks:              Must contain an entry for peripheral clock.
-
-Example:
-       vrefbuf: regulator@58003c00 {
-               compatible = "st,stm32-vrefbuf";
-               reg = <0x58003C00 0x8>;
-               clocks = <&rcc VREF_CK>;
-               regulator-min-microvolt = <1500000>;
-               regulator-max-microvolt = <2500000>;
-               vdda-supply = <&vdda>;
-       };
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.yaml
new file mode 100644 (file)
index 0000000..33cdaeb
--- /dev/null
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/st,stm32-vrefbuf.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Voltage reference buffer bindings
+
+description: |
+  Some STM32 devices embed a voltage reference buffer which can be used as
+  voltage reference for ADCs, DACs and also as voltage reference for external
+  components through the dedicated VREF+ pin.
+
+maintainers:
+  - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+allOf:
+  - $ref: "regulator.yaml#"
+
+properties:
+  compatible:
+    const: st,stm32-vrefbuf
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  vdda-supply:
+    description: phandle to the vdda input analog voltage.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - vdda-supply
+
+examples:
+  - |
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    vrefbuf@50025000 {
+      compatible = "st,stm32-vrefbuf";
+      reg = <0x50025000 0x8>;
+      regulator-min-microvolt = <1500000>;
+      regulator-max-microvolt = <2500000>;
+      clocks = <&rcc VREF>;
+      vdda-supply = <&vdda>;
+    };
+
+...
+
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt
deleted file mode 100644 (file)
index e372dd3..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-STM32MP1 PWR Regulators
------------------------
-
-Available Regulators in STM32MP1 PWR block are:
-  - reg11 for regulator 1V1
-  - reg18 for regulator 1V8
-  - usb33 for the swtich USB3V3
-
-Required properties:
-- compatible: Must be "st,stm32mp1,pwr-reg"
-- list of child nodes that specify the regulator reg11, reg18 or usb33
-  initialization data for defined regulators. The definition for each of
-  these nodes is defined using the standard binding for regulators found at
-  Documentation/devicetree/bindings/regulator/regulator.txt.
-- vdd-supply: phandle to the parent supply/regulator node for vdd input
-- vdd_3v3_usbfs-supply: phandle to the parent supply/regulator node for usb33
-
-Example:
-
-pwr_regulators: pwr@50001000 {
-       compatible = "st,stm32mp1,pwr-reg";
-       reg = <0x50001000 0x10>;
-       vdd-supply = <&vdd>;
-       vdd_3v3_usbfs-supply = <&vdd_usb>;
-
-       reg11: reg11 {
-               regulator-name = "reg11";
-               regulator-min-microvolt = <1100000>;
-               regulator-max-microvolt = <1100000>;
-       };
-
-       reg18: reg18 {
-               regulator-name = "reg18";
-               regulator-min-microvolt = <1800000>;
-               regulator-max-microvolt = <1800000>;
-       };
-
-       usb33: usb33 {
-               regulator-name = "usb33";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
new file mode 100644 (file)
index 0000000..8d8f38f
--- /dev/null
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/st,stm32mp1-pwr-reg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32MP1 PWR voltage regulators
+
+maintainers:
+  - Pascal Paillet <p.paillet@st.com>
+
+properties:
+  compatible:
+    const: st,stm32mp1,pwr-reg
+
+  reg:
+    maxItems: 1
+
+  vdd-supply:
+    description: Input supply phandle(s) for vdd input
+
+  vdd_3v3_usbfs-supply:
+    description: Input supply phandle(s) for vdd_3v3_usbfs input
+
+patternProperties:
+  "^(reg11|reg18|usb33)$":
+    type: object
+
+    allOf:
+      - $ref: "regulator.yaml#"
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    pwr@50001000 {
+      compatible = "st,stm32mp1,pwr-reg";
+      reg = <0x50001000 0x10>;
+      vdd-supply = <&vdd>;
+      vdd_3v3_usbfs-supply = <&vdd_usb>;
+
+      reg11 {
+        regulator-name = "reg11";
+        regulator-min-microvolt = <1100000>;
+        regulator-max-microvolt = <1100000>;
+      };
+
+      reg18 {
+        regulator-name = "reg18";
+        regulator-min-microvolt = <1800000>;
+        regulator-max-microvolt = <1800000>;
+      };
+
+      usb33 {
+        regulator-name = "usb33";
+        regulator-min-microvolt = <3300000>;
+        regulator-max-microvolt = <3300000>;
+      };
+    };
+...
index acf18d170352d242812d43338974a92ff85d7c93..c0d83865e933a2cbe896c5ae847ee7640cbc5637 100644 (file)
@@ -50,6 +50,8 @@ properties:
     description: Should contain the WWDG1 watchdog reset interrupt
     maxItems: 1
 
+  wakeup-source: true
+
   mboxes:
     description:
       This property is required only if the rpmsg/virtio functionality is used.
index 6e5341b4f8919ecda07304b5a2406f33ffa2c00f..ee59409640f244ebf5d985c18f46d91d3fbd9f6a 100644 (file)
@@ -22,6 +22,6 @@ Example:
        };
 
        &ethernet_switch {
-               resets = <&reset>;
+               resets = <&reset 26>;
                reset-names = "switch";
        };
index 46d69c32b89b89506f28a3a73dc1803fa9490a21..478b0234e8fa97416db572c50ce58fed4c5b88c7 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index d7a57ec4a6400bec0f0107fb1aeba516a1175bb1..37c2a601c3fa8ab075cbc2a7817876386a0600f3 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A31 RTC Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#clock-cells":
index ee9712f1c97d44b23bd2cf61b0cbe0c60f7b85fe..2ecab8ed702a20abfa5d1e44b89e267852df1c06 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 PS2 Host Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 description:
   A20 PS2 is dual role controller (PS2 host and PS2 device). These
diff --git a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.txt
new file mode 100644 (file)
index 0000000..59758cc
--- /dev/null
@@ -0,0 +1,59 @@
+* Texas Instruments K3 NavigatorSS Ring Accelerator
+
+The Ring Accelerator (RA) is a machine which converts read/write accesses
+from/to a constant address into corresponding read/write accesses from/to a
+circular data structure in memory. The RA eliminates the need for each DMA
+controller which needs to access ring elements from having to know the current
+state of the ring (base address, current offset). The DMA controller
+performs a read or write access to a specific address range (which maps to the
+source interface on the RA) and the RA replaces the address for the transaction
+with a new address which corresponds to the head or tail element of the ring
+(head for reads, tail for writes).
+
+The Ring Accelerator is a hardware module that is responsible for accelerating
+management of the packet queues. The K3 SoCs can have more than one RA instances
+
+Required properties:
+- compatible   : Must be "ti,am654-navss-ringacc";
+- reg          : Should contain register location and length of the following
+                 named register regions.
+- reg-names    : should be
+                 "rt" - The RA Ring Real-time Control/Status Registers
+                 "fifos" - The RA Queues Registers
+                 "proxy_gcfg" - The RA Proxy Global Config Registers
+                 "proxy_target" - The RA Proxy Datapath Registers
+- ti,num-rings : Number of rings supported by RA
+- ti,sci-rm-range-gp-rings : TI-SCI RM subtype for GP ring range
+- ti,sci       : phandle on TI-SCI compatible System controller node
+- ti,sci-dev-id        : TI-SCI device id of the ring accelerator
+- msi-parent   : phandle for "ti,sci-inta" interrupt controller
+
+Optional properties:
+ -- ti,dma-ring-reset-quirk : enable ringacc / udma ring state interoperability
+                 issue software w/a
+
+Example:
+
+ringacc: ringacc@3c000000 {
+       compatible = "ti,am654-navss-ringacc";
+       reg =   <0x0 0x3c000000 0x0 0x400000>,
+               <0x0 0x38000000 0x0 0x400000>,
+               <0x0 0x31120000 0x0 0x100>,
+               <0x0 0x33000000 0x0 0x40000>;
+       reg-names = "rt", "fifos",
+                   "proxy_gcfg", "proxy_target";
+       ti,num-rings = <818>;
+       ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+       ti,dma-ring-reset-quirk;
+       ti,sci = <&dmsc>;
+       ti,sci-dev-id = <187>;
+       msi-parent = <&inta_main_udmass>;
+};
+
+client:
+
+dma_ipx: dma_ipx@<addr> {
+       ...
+       ti,ringacc = <&ringacc>;
+       ...
+}
index b8f89c7258ebc853a1c416a75c8dc08f634aa8aa..ea1d2efb2aaa36d79790132db1ad6a170a55220e 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Codec Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#sound-dai-cells":
index eb3992138eecce3cf0283754b9c47a9ab6f8f4ee..112ae00d63c1d6576583b1542705b6a8a7ef500b 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 I2S Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#sound-dai-cells":
index 38d4cede0860dd5aed5402e4cdb34dff5f39a677..444a432912bb4ec7970a2a61e26e33b2b09fac24 100644 (file)
@@ -10,7 +10,7 @@ maintainers:
   - Chen-Yu Tsai <wens@csie.org>
   - Liam Girdwood <lgirdwood@gmail.com>
   - Mark Brown <broonie@kernel.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#sound-dai-cells":
index f290eb72a8781e7b0f2138ac74271cd087583716..3b764415c9abf7a696c7f965cbb36abfac4264d0 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A64 Analog Codec Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 85305b4c2729b74759d319eef9ac5d9cb88234e7..9718358826abdaebd02260385519c7a5b7d34898 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A23 Analog Codec Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 5e7cc05bbff1dc5f3705b58930f3674f89e67e15..55d28268d2f4ff5c0860b02609185311b42b90ce 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A33 Codec Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#sound-dai-cells":
index 6d1329c281707fbefe0aa403b860c6105d5a4a71..8036499112f5a858e00f8edf0fa425eacf7db76f 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#address-cells": true
index f36c46d236d7a6afdf82ee2a7af704ecbf68e1a6..0565dc49e44940dc6326f2ee2c1672542df653d2 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   "#address-cells": true
index 1fd9a4406a1dbc0363644b4a5122462461e1ebc9..b98203ca656d3a7ec655750d60958313648e399f 100644 (file)
@@ -12,6 +12,7 @@ Required properties:
  - clock-names: Should be "clk_apb5".
  - pinctrl-names : a pinctrl state named "default" must be defined.
  - pinctrl-0 : phandle referencing pin configuration of the device.
+ - resets : phandle to the reset control for this device.
  - cs-gpios: Specifies the gpio pins to be used for chipselects.
             See: Documentation/devicetree/bindings/spi/spi-bus.txt
 
@@ -19,16 +20,6 @@ Optional properties:
 - clock-frequency : Input clock frequency to the PSPI block in Hz.
                    Default is 25000000 Hz.
 
-Aliases:
-- All the SPI controller nodes should be represented in the aliases node using
-  the following format 'spi{n}' withe the correct numbered in "aliases" node.
-
-Example:
-
-aliases {
-       spi0 = &spi0;
-};
-
 spi0: spi@f0200000 {
        compatible = "nuvoton,npcm750-pspi";
        reg = <0xf0200000 0x1000>;
@@ -39,5 +30,6 @@ spi0: spi@f0200000 {
        interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
        clocks = <&clk NPCM7XX_CLK_APB5>;
        clock-names = "clk_apb5";
+       resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>
        cs-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
 };
index 732339275848c12a52c438656f97e3e74a44b9ff..1e0ca6ccf64bbd0a2e257d8dbcda2d829fcb742f 100644 (file)
@@ -111,7 +111,7 @@ patternProperties:
       spi-rx-bus-width:
         allOf:
           - $ref: /schemas/types.yaml#/definitions/uint32
-          - enum: [ 1, 2, 4 ]
+          - enum: [ 1, 2, 4, 8 ]
           - default: 1
         description:
           Bus width to the SPI bus used for MISO.
@@ -123,7 +123,7 @@ patternProperties:
       spi-tx-bus-width:
         allOf:
           - $ref: /schemas/types.yaml#/definitions/uint32
-          - enum: [ 1, 2, 4 ]
+          - enum: [ 1, 2, 4, 8 ]
           - default: 1
         description:
           Bus width to the SPI bus used for MOSI.
diff --git a/Documentation/devicetree/bindings/spi/spi-stm32.txt b/Documentation/devicetree/bindings/spi/spi-stm32.txt
deleted file mode 100644 (file)
index d82755c..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-STMicroelectronics STM32 SPI Controller
-
-The STM32 SPI controller is used to communicate with external devices using
-the Serial Peripheral Interface. It supports full-duplex, half-duplex and
-simplex synchronous serial communication with external devices. It supports
-from 4 to 32-bit data size. Although it can be configured as master or slave,
-only master is supported by the driver.
-
-Required properties:
-- compatible: Should be one of:
-  "st,stm32h7-spi"
-  "st,stm32f4-spi"
-- reg: Offset and length of the device's register set.
-- interrupts: Must contain the interrupt id.
-- clocks: Must contain an entry for spiclk (which feeds the internal clock
-         generator).
-- #address-cells:  Number of cells required to define a chip select address.
-- #size-cells: Should be zero.
-
-Optional properties:
-- resets: Must contain the phandle to the reset controller.
-- A pinctrl state named "default" may be defined to set pins in mode of
-  operation for SPI transfer.
-- dmas: DMA specifiers for tx and rx dma. DMA fifo mode must be used. See the
-  STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt.
-- dma-names: DMA request names should include "tx" and "rx" if present.
-- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
-  Documentation/devicetree/bindings/spi/spi-bus.txt
-
-
-Child nodes represent devices on the SPI bus
-  See ../spi/spi-bus.txt
-
-Optional properties:
-- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time
-                 delay in nanoseconds inserted between two consecutive data
-                 frames.
-
-
-Example:
-       spi2: spi@40003800 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "st,stm32h7-spi";
-               reg = <0x40003800 0x400>;
-               interrupts = <36>;
-               clocks = <&rcc SPI2_CK>;
-               resets = <&rcc 1166>;
-               dmas = <&dmamux1 0 39 0x400 0x01>,
-                      <&dmamux1 1 40 0x400 0x01>;
-               dma-names = "rx", "tx";
-               pinctrl-0 = <&spi2_pins_b>;
-               pinctrl-names = "default";
-               cs-gpios = <&gpioa 11 0>;
-
-               aardvark@0 {
-                       compatible = "totalphase,aardvark";
-                       reg = <0>;
-                       spi-max-frequency = <4000000>;
-                       st,spi-midi-ns = <4000>;
-               };
-       };
index f99c733d75c12b6d393eb8b48f66c2cbdeaba56b..5bb4a8f1df7a2869afcd70e83017bb2799b1c374 100644 (file)
@@ -1,7 +1,7 @@
 Atmel SPI device
 
 Required properties:
-- compatible : should be "atmel,at91rm9200-spi".
+- compatible : should be "atmel,at91rm9200-spi" or "microchip,sam9x60-spi".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain spi interrupt
 - cs-gpios: chipselects (optional for SPI controller version >= 2 with the
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
new file mode 100644 (file)
index 0000000..f0d9796
--- /dev/null
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/st,stm32-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 SPI Controller bindings
+
+description: |
+  The STM32 SPI controller is used to communicate with external devices using
+  the Serial Peripheral Interface. It supports full-duplex, half-duplex and
+  simplex synchronous serial communication with external devices. It supports
+  from 4 to 32-bit data size.
+
+maintainers:
+  - Erwan Leray <erwan.leray@st.com>
+  - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+allOf:
+  - $ref: "spi-controller.yaml#"
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32f4-spi
+
+    then:
+      properties:
+        st,spi-midi-ns: false
+
+properties:
+  compatible:
+    enum:
+      - st,stm32f4-spi
+      - st,stm32h7-spi
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  dmas:
+    description: |
+      DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
+      the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt.
+    items:
+      - description: rx DMA channel
+      - description: tx DMA channel
+
+  dma-names:
+    items:
+      - const: rx
+      - const: tx
+
+patternProperties:
+  "^[a-zA-Z][a-zA-Z0-9,+\\-._]{0,63}@[0-9a-f]+$":
+    type: object
+    # SPI slave nodes must be children of the SPI master node and can
+    # contain the following properties.
+    properties:
+      st,spi-midi-ns:
+        description: |
+          Only for STM32H7, (Master Inter-Data Idleness) minimum time
+          delay in nanoseconds inserted between two consecutive data frames.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    spi@4000b000 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      compatible = "st,stm32h7-spi";
+      reg = <0x4000b000 0x400>;
+      interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&rcc SPI2_K>;
+      resets = <&rcc SPI2_R>;
+      dmas = <&dmamux1 0 39 0x400 0x05>,
+             <&dmamux1 1 40 0x400 0x05>;
+      dma-names = "rx", "tx";
+      cs-gpios = <&gpioa 11 0>;
+
+      aardvark@0 {
+        compatible = "totalphase,aardvark";
+        reg = <0>;
+        spi-max-frequency = <4000000>;
+        st,spi-midi-ns = <4000>;
+      };
+    };
+
+...
index 20adc1c8e9cccb33a94f385966c02df961f0fa59..23e989e0976630382d70ab5ddaeeb3b4c3542b61 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 Timer Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index dfa0c41fd261d166b95af163f195f45358798bc8..40fc4bcb31457db7374f4266839894469cd2366f 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A13 High-Speed Timer Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index a444cfc5852a6ab6c02f3ec84dd8153ab6fbf67c..a747fabab7d3fda45fba1b74424e85fc9fe65b90 100644 (file)
@@ -29,6 +29,8 @@ Required Properties:
     - "renesas,r8a77470-cmt1" for the 48-bit CMT1 device included in r8a77470.
     - "renesas,r8a774a1-cmt0" for the 32-bit CMT0 device included in r8a774a1.
     - "renesas,r8a774a1-cmt1" for the 48-bit CMT devices included in r8a774a1.
+    - "renesas,r8a774b1-cmt0" for the 32-bit CMT0 device included in r8a774b1.
+    - "renesas,r8a774b1-cmt1" for the 48-bit CMT devices included in r8a774b1.
     - "renesas,r8a774c0-cmt0" for the 32-bit CMT0 device included in r8a774c0.
     - "renesas,r8a774c0-cmt1" for the 48-bit CMT devices included in r8a774c0.
     - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790.
index 0af70fc8de5a764003f08e6cf940f52d4d84c86b..d9207bf9d8946e4ee9c799f1adc92ec5906d0359 100644 (file)
@@ -8,7 +8,7 @@ title: Allwinner A10 mUSB OTG Controller Device Tree Bindings
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 3a54f58683a0ed01292c43b2554f63d295461a5d..e8f226376108178037de849d474715ce2d6d678c 100644 (file)
@@ -11,7 +11,7 @@ allOf:
 
 maintainers:
   - Chen-Yu Tsai <wens@csie.org>
-  - Maxime Ripard <maxime.ripard@bootlin.com>
+  - Maxime Ripard <mripard@kernel.org>
 
 properties:
   compatible:
index 45953f17150007d750b5b12b41e0ac31599f302e..a9a7a3c84c63d2730d2573896bca9d0fa003da76 100644 (file)
@@ -151,6 +151,93 @@ The details of these operations are:
      Note that callbacks will always be invoked from the DMA
      engines tasklet, never from interrupt context.
 
+  Optional: per descriptor metadata
+  ---------------------------------
+  DMAengine provides two ways for metadata support.
+
+  DESC_METADATA_CLIENT
+
+    The metadata buffer is allocated/provided by the client driver and it is
+    attached to the descriptor.
+
+  .. code-block:: c
+
+     int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+                                  void *data, size_t len);
+
+  DESC_METADATA_ENGINE
+
+    The metadata buffer is allocated/managed by the DMA driver. The client
+    driver can ask for the pointer, maximum size and the currently used size of
+    the metadata and can directly update or read it.
+
+    Becasue the DMA driver manages the memory area containing the metadata,
+    clients must make sure that they do not try to access or get the pointer
+    after their transfer completion callback has run for the descriptor.
+    If no completion callback has been defined for the transfer, then the
+    metadata must not be accessed after issue_pending.
+    In other words: if the aim is to read back metadata after the transfer is
+    completed, then the client must use completion callback.
+
+  .. code-block:: c
+
+     void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+               size_t *payload_len, size_t *max_len);
+
+     int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+               size_t payload_len);
+
+  Client drivers can query if a given mode is supported with:
+
+  .. code-block:: c
+
+     bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+               enum dma_desc_metadata_mode mode);
+
+  Depending on the used mode client drivers must follow different flow.
+
+  DESC_METADATA_CLIENT
+
+    - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+         construct the metadata in the client's buffer
+      2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+         descriptor
+      3. submit the transfer
+    - DMA_DEV_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+      2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+         descriptor
+      3. submit the transfer
+      4. when the transfer is completed, the metadata should be available in the
+         attached buffer
+
+  DESC_METADATA_ENGINE
+
+    - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+      2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
+         engine's metadata area
+      3. update the metadata at the pointer
+      4. use dmaengine_desc_set_metadata_len()  to tell the DMA engine the
+         amount of data the client has placed into the metadata buffer
+      5. submit the transfer
+    - DMA_DEV_TO_MEM:
+      1. prepare the descriptor (dmaengine_prep_*)
+      2. submit the transfer
+      3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
+         the pointer to the engine's metadata area
+      4. read out the metadata from the pointer
+
+  .. note::
+
+     When DESC_METADATA_ENGINE mode is used the metadata area for the descriptor
+     is no longer valid after the transfer has been completed (valid up to the
+     point when the completion callback returns if used).
+
+     Mixed use of DESC_METADATA_CLIENT / DESC_METADATA_ENGINE is not allowed,
+     client drivers must use either of the modes per descriptor.
+
 4. Submit the transaction
 
    Once the descriptor has been prepared and the callback information
index dfc4486b5743c127fa98d8b6dc0a9e4b9659d562..790a15089f1f3d8f47917283a632b5b866f69848 100644 (file)
@@ -247,6 +247,54 @@ after each transfer. In case of a ring buffer, they may loop
 (DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
 are typically fixed.
 
+Per descriptor metadata support
+-------------------------------
+Some data movement architecture (DMA controller and peripherals) uses metadata
+associated with a transaction. The DMA controller role is to transfer the
+payload and the metadata alongside.
+The metadata itself is not used by the DMA engine itself, but it contains
+parameters, keys, vectors, etc for peripheral or from the peripheral.
+
+The DMAengine framework provides a generic ways to facilitate the metadata for
+descriptors. Depending on the architecture the DMA driver can implement either
+or both of the methods and it is up to the client driver to choose which one
+to use.
+
+- DESC_METADATA_CLIENT
+
+  The metadata buffer is allocated/provided by the client driver and it is
+  attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
+
+  From the DMA driver the following is expected for this mode:
+  - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
+    The data from the provided metadata buffer should be prepared for the DMA
+    controller to be sent alongside of the payload data. Either by copying to a
+    hardware descriptor, or highly coupled packet.
+  - DMA_DEV_TO_MEM
+    On transfer completion the DMA driver must copy the metadata to the client
+    provided metadata buffer before notifying the client about the completion.
+    After the transfer completion, DMA drivers must not touch the metadata
+    buffer provided by the client.
+
+- DESC_METADATA_ENGINE
+
+  The metadata buffer is allocated/managed by the DMA driver. The client driver
+  can ask for the pointer, maximum size and the currently used size of the
+  metadata and can directly update or read it. dmaengine_desc_get_metadata_ptr()
+  and dmaengine_desc_set_metadata_len() is provided as helper functions.
+
+  From the DMA driver the following is expected for this mode:
+  - get_metadata_ptr
+    Should return a pointer for the metadata buffer, the maximum size of the
+    metadata buffer and the currently used / valid (if any) bytes in the buffer.
+  - set_metadata_len
+    It is called by the clients after it have placed the metadata to the buffer
+    to let the DMA driver know the number of valid bytes provided.
+
+  Note: since the client will ask for the metadata pointer in the completion
+  callback (in DMA_DEV_TO_MEM case) the DMA driver must ensure that the
+  descriptor is not freed up prior the callback is called.
+
 Device operations
 -----------------
 
index 13046fcf0a5da7c98f92631e69b8eb295b2fa319..20e07e40be020318e7ed15bc5be93a51e3f53a90 100644 (file)
@@ -313,7 +313,6 @@ IOMAP
   devm_ioport_map()
   devm_ioport_unmap()
   devm_ioremap()
-  devm_ioremap_nocache()
   devm_ioremap_uc()
   devm_ioremap_wc()
   devm_ioremap_resource() : checks resource, requests memory region, ioremaps
index 059d58a549c7a6e71da1eae8408f5b6892e69c02..6fb2b0671994efd5d526762e58ef6f2b242a1276 100644 (file)
@@ -23,7 +23,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: |  ok  |
     |          sh: |  ok  |
     |       sparc: | TODO |
index b0c085326e2e68f4ead9063af48beeda0b48dcf9..db6d39c3ae715ee90b876c4660346581f3b67171 100644 (file)
@@ -24,11 +24,11 @@ Here is the main features of EROFS:
  - Metadata & data could be mixed by design;
 
  - 2 inode versions for different requirements:
-                          v1            v2
+                          compact (v1)  extended (v2)
    Inode metadata size:   32 bytes      64 bytes
    Max file size:         4 GB          16 EB (also limited by max. vol size)
    Max uids/gids:         65536         4294967296
-   File creation time:    no            yes (64 + 32-bit timestamp)
+   File change time:      no            yes (64 + 32-bit timestamp)
    Max hardlinks:         65536         4294967296
    Metadata reserved:     4 bytes       14 bytes
 
@@ -39,7 +39,7 @@ Here is the main features of EROFS:
  - Support POSIX.1e ACLs by using xattrs;
 
  - Support transparent file compression as an option:
-   LZ4 algorithm with 4 KB fixed-output compression for high performance;
+   LZ4 algorithm with 4 KB fixed-sized output compression for high performance.
 
 The following git tree provides the file system user-space tools under
 development (ex, formatting tool mkfs.erofs):
@@ -85,7 +85,7 @@ All data areas should be aligned with the block size, but metadata areas
 may not. All metadatas can be now observed in two different spaces (views):
  1. Inode metadata space
     Each valid inode should be aligned with an inode slot, which is a fixed
-    value (32 bytes) and designed to be kept in line with v1 inode size.
+    value (32 bytes) and designed to be kept in line with compact inode size.
 
     Each inode can be directly found with the following formula:
          inode offset = meta_blkaddr * block_size + 32 * nid
@@ -117,10 +117,10 @@ may not. All metadatas can be now observed in two different spaces (views):
                                                        |-> aligned with 4B
 
     Inode could be 32 or 64 bytes, which can be distinguished from a common
-    field which all inode versions have -- i_advise:
+    field which all inode versions have -- i_format:
 
         __________________               __________________
-       |     i_advise     |             |     i_advise     |
+       |     i_format     |             |     i_format     |
        |__________________|             |__________________|
        |        ...       |             |        ...       |
        |                  |             |                  |
@@ -129,12 +129,13 @@ may not. All metadatas can be now observed in two different spaces (views):
                                         |__________________| 64 bytes
 
     Xattrs, extents, data inline are followed by the corresponding inode with
-    proper alignes, and they could be optional for different data mappings,
-    _currently_ there are totally 3 valid data mappings supported:
+    proper alignment, and they could be optional for different data mappings.
+    _currently_ total 4 valid data mappings are supported:
 
-     1) flat file data without data inline (no extent);
-     2) fixed-output size data compression (must have extents);
-     3) flat file data with tail-end data inline (no extent);
+     0  flat file data without data inline (no extent);
+     1  fixed-sized output data compression (with non-compacted indexes);
+     2  flat file data with tail packing data inline (no extent);
+     3  fixed-sized output data compression (with compacted indexes, v5.3+).
 
     The size of the optional xattrs is indicated by i_xattr_count in inode
     header. Large xattrs or xattrs shared by many different files can be
@@ -182,8 +183,8 @@ introduce another on-disk field at all.
 
 Compression
 -----------
-Currently, EROFS supports 4KB fixed-output clustersize transparent file
-compression, as illustrated below:
+Currently, EROFS supports 4KB fixed-sized output transparent file compression,
+as illustrated below:
 
          |---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
          clusterofs                      clusterofs            clusterofs
similarity index 99%
rename from Documentation/filesystems/overlayfs.txt
rename to Documentation/filesystems/overlayfs.rst
index 845d689e0fd711672416669e501f68757e498a8f..e443be7928db2b674ad4272d6d4adf74bacd356f 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 Written by: Neil Brown
 Please see MAINTAINERS file for where to send questions.
 
@@ -181,7 +183,7 @@ Kernel config options:
     worried about backward compatibility with kernels that have the redirect_dir
     feature and follow redirects even if turned off.
 
-Module options (can also be changed through /sys/module/overlay/parameters/*):
+Module options (can also be changed through /sys/module/overlay/parameters/):
 
 - "redirect_dir=BOOL":
     See OVERLAY_FS_REDIRECT_DIR kernel config option above.
@@ -263,7 +265,7 @@ top, lower2 the middle and lower3 the bottom layer.
 
 
 Metadata only copy up
---------------------
+---------------------
 
 When metadata only copy up feature is enabled, overlayfs will only copy
 up metadata (as opposed to whole file), when a metadata specific operation
@@ -286,10 +288,10 @@ pointed by REDIRECT. This should not be possible on local system as setting
 "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible
 for untrusted layers like from a pen drive.
 
-Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and
+Note: redirect_dir={off|nofollow|follow[*]} conflicts with metacopy=on, and
 results in an error.
 
-(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
+[*] redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
 given.
 
 Sharing and copying layers
index 0a72b6321f5fb8b379b03c527905b02557993c0b..c13fee8b02ba7a8ecf03ee96571e1bf14bfd117c 100644 (file)
@@ -71,8 +71,8 @@ DMA support
 DMA controllers enumerated via ACPI should be registered in the system to
 provide generic access to their resources. For example, a driver that would
 like to be accessible to slave devices via generic API call
-dma_request_slave_channel() must register itself at the end of the probe
-function like this::
+dma_request_chan() must register itself at the end of the probe function like
+this::
 
        err = devm_acpi_dma_controller_register(dev, xlate_func, dw);
        /* Handle the error if it's not a case of !CONFIG_ACPI */
@@ -112,15 +112,15 @@ could look like::
        }
        #endif
 
-dma_request_slave_channel() will call xlate_func() for each registered DMA
-controller. In the xlate function the proper channel must be chosen based on
+dma_request_chan() will call xlate_func() for each registered DMA controller.
+In the xlate function the proper channel must be chosen based on
 information in struct acpi_dma_spec and the properties of the controller
 provided by struct acpi_dma.
 
-Clients must call dma_request_slave_channel() with the string parameter that
-corresponds to a specific FixedDMA resource. By default "tx" means the first
-entry of the FixedDMA resource array, "rx" means the second entry. The table
-below shows a layout::
+Clients must call dma_request_chan() with the string parameter that corresponds
+to a specific FixedDMA resource. By default "tx" means the first entry of the
+FixedDMA resource array, "rx" means the second entry. The table below shows a
+layout::
 
        Device (I2C0)
        {
diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst
new file mode 100644 (file)
index 0000000..c81e0b4
--- /dev/null
@@ -0,0 +1,36 @@
+Kernel driver adm1177
+=====================
+
+Supported chips:
+  * Analog Devices ADM1177
+    Prefix: 'adm1177'
+    Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADM1177.pdf
+
+Author: Beniamin Bia <beniamin.bia@analog.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for Analog Devices ADM1177
+Hot-Swap Controller and Digital Power Monitors with Soft Start Pin.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Current maxim attribute
+is read-write, all other attributes are read-only.
+
+in0_input              Measured voltage in microvolts.
+
+curr1_input            Measured current in microamperes.
+curr1_max_alarm                Overcurrent alarm in microamperes.
diff --git a/Documentation/hwmon/drivetemp.rst b/Documentation/hwmon/drivetemp.rst
new file mode 100644 (file)
index 0000000..2d37d04
--- /dev/null
@@ -0,0 +1,52 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver drivetemp
+=======================
+
+
+References
+----------
+
+ANS T13/1699-D
+Information technology - AT Attachment 8 - ATA/ATAPI Command Set (ATA8-ACS)
+
+ANS Project T10/BSR INCITS 513
+Information technology - SCSI Primary Commands - 4 (SPC-4)
+
+ANS Project INCITS 557
+Information technology - SCSI / ATA Translation - 5 (SAT-5)
+
+
+Description
+-----------
+
+This driver supports reporting the temperature of disk and solid state
+drives with temperature sensors.
+
+If supported, it uses the ATA SCT Command Transport feature to read
+the current drive temperature and, if available, temperature limits
+as well as historic minimum and maximum temperatures. If SCT Command
+Transport is not supported, the driver uses SMART attributes to read
+the drive temperature.
+
+
+Sysfs entries
+-------------
+
+Only the temp1_input attribute is always available. Other attributes are
+available only if reported by the drive. All temperatures are reported in
+milli-degrees Celsius.
+
+=======================        =====================================================
+temp1_input            Current drive temperature
+temp1_lcrit            Minimum temperature limit. Operating the device below
+                       this temperature may cause physical damage to the
+                       device.
+temp1_min              Minimum recommended continuous operating limit
+temp1_max              Maximum recommended continuous operating temperature
+temp1_crit             Maximum temperature limit. Operating the device above
+                       this temperature may cause physical damage to the
+                       device.
+temp1_lowest           Minimum temperature seen this power cycle
+temp1_highest          Maximum temperature seen this power cycle
+=======================        =====================================================
index 43cc605741ea4abf35923762295504b2b5ebd2ab..b24adb67ddca6c334298b3e9ec77485e8a95f1fb 100644 (file)
@@ -29,6 +29,7 @@ Hardware Monitoring Kernel Drivers
    adm1025
    adm1026
    adm1031
+   adm1177
    adm1275
    adm9240
    ads7828
@@ -47,6 +48,7 @@ Hardware Monitoring Kernel Drivers
    da9055
    dell-smm-hwmon
    dme1737
+   drivetemp
    ds1621
    ds620
    emc1403
@@ -106,8 +108,10 @@ Hardware Monitoring Kernel Drivers
    max1619
    max1668
    max197
+   max20730
    max20751
    max31722
+   max31730
    max31785
    max31790
    max34440
@@ -177,6 +181,7 @@ Hardware Monitoring Kernel Drivers
    wm831x
    wm8350
    xgene-hwmon
+   xdpe12284
    zl6100
 
 .. only::  subproject and html
diff --git a/Documentation/hwmon/max20730.rst b/Documentation/hwmon/max20730.rst
new file mode 100644 (file)
index 0000000..cea7ae5
--- /dev/null
@@ -0,0 +1,74 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver max20730
+======================
+
+Supported chips:
+
+  * Maxim MAX20730
+
+    Prefix: 'max20730'
+
+    Addresses scanned: -
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20730.pdf
+
+  * Maxim MAX20734
+
+    Prefix: 'max20734'
+
+    Addresses scanned: -
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20734.pdf
+
+  * Maxim MAX20743
+
+    Prefix: 'max20743'
+
+    Addresses scanned: -
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20743.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for Maxim MAX20730, MAX20734, and MAX20743
+Integrated, Step-Down Switching Regulators with PMBus support.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Sysfs entries
+-------------
+
+=================== ===== =======================================================
+curr1_crit          RW/RO Critical output current. Please see datasheet for
+                          supported limits. Read-only if the chip is
+                          write protected; read-write otherwise.
+curr1_crit_alarm    RO    Output current critical alarm
+curr1_input         RO    Output current
+curr1_label         RO    'iout1'
+in1_alarm           RO    Input voltage alarm
+in1_input           RO    Input voltage
+in1_label           RO    'vin'
+in2_alarm           RO    Output voltage alarm
+in2_input           RO    Output voltage
+in2_label           RO    'vout1'
+temp1_crit          RW/RO Critical temeperature. Supported values are 130 or 150
+                          degrees C. Read-only if the chip is write protected;
+                          read-write otherwise.
+temp1_crit_alarm    RO    Temperature critical alarm
+temp1_input         RO    Chip temperature
+=================== ===== =======================================================
diff --git a/Documentation/hwmon/max31730.rst b/Documentation/hwmon/max31730.rst
new file mode 100644 (file)
index 0000000..def0de1
--- /dev/null
@@ -0,0 +1,44 @@
+Kernel driver max31790
+======================
+
+Supported chips:
+
+  * Maxim MAX31730
+
+    Prefix: 'max31730'
+
+    Addresses scanned: 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, 0x4d, 0x4e, 0x4f
+
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31730.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for Maxim MAX31730.
+
+The MAX31730 temperature sensor monitors its own temperature and the
+temperatures of three external diode-connected transistors. The operating
+supply voltage is from 3.0V to 3.6V. Resistance cancellation compensates
+for high series resistance in circuit-board traces and the external thermal
+diode, while beta compensation corrects for temperature-measurement
+errors due to low-beta sensing transistors.
+
+
+Sysfs entries
+-------------
+
+=================== == =======================================================
+temp[1-4]_enable    RW Temperature enable/disable
+                       Set to 0 to enable channel, 0 to disable
+temp[1-4]_input     RO Temperature input
+temp[2-4]_fault     RO Fault indicator for remote channels
+temp[1-4]_max       RW Maximum temperature
+temp[1-4]_max_alarm RW Maximum temperature alarm
+temp[1-4]_min       RW Minimum temperature. Common for all channels.
+                       Only temp1_min is writeable.
+temp[1-4]_min_alarm RO Minimum temperature alarm
+temp[2-4]_offset    RW Temperature offset for remote channels
+=================== == =======================================================
index abfb9dd4857d51d30613578d0e1498807669505e..f787984e88a9c1f33ecf9fb4176d51be1ebf29fe 100644 (file)
@@ -63,6 +63,16 @@ Supported chips:
 
        http://www.ti.com/lit/gpn/tps544c25
 
+  * Maxim MAX20796
+
+    Prefix: 'max20796'
+
+    Addresses scanned: -
+
+    Datasheet:
+
+       Not published
+
   * Generic PMBus devices
 
     Prefix: 'pmbus'
index 746f21fcb48ccc9c77f74f3fd0e435622bb31425..704f0cbd95d3b3e9ccc27738dd379ca768e37182 100644 (file)
@@ -3,9 +3,10 @@ Kernel driver ucd9000
 
 Supported chips:
 
-  * TI UCD90120, UCD90124, UCD90160, UCD9090, and UCD90910
+  * TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, and UCD90910
 
-    Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd9090', 'ucd90910'
+    Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd90320', 'ucd9090',
+              'ucd90910'
 
     Addresses scanned: -
 
@@ -14,6 +15,7 @@ Supported chips:
        - http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
+       - http://focus.ti.com/lit/ds/symlink/ucd90320.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
        - http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
 
@@ -45,6 +47,12 @@ power-on reset signals, external interrupts, cascading, or other system
 functions. Twelve of these pins offer PWM functionality. Using these pins, the
 UCD90160 offers support for margining, and general-purpose PWM functions.
 
+The UCD90320 is a 32-rail PMBus/I2C addressable power-supply sequencer and
+monitor. The 24 integrated ADC channels (AMONx) monitor the power supply
+voltage, current, and temperature. Of the 84 GPIO pins, 8 can be used as
+digital monitors (DMONx), 32 to enable the power supply (ENx), 24 for margining
+(MARx), 16 for logical GPO, and 32 GPIs for cascading, and system function.
+
 The UCD9090 is a 10-rail PMBus/I2C addressable power-supply sequencer and
 monitor. The device integrates a 12-bit ADC for monitoring up to 10 power-supply
 voltage inputs. Twenty-three GPIO pins can be used for power supply enables,
diff --git a/Documentation/hwmon/xdpe12284.rst b/Documentation/hwmon/xdpe12284.rst
new file mode 100644 (file)
index 0000000..6b7ae98
--- /dev/null
@@ -0,0 +1,101 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver xdpe122
+=====================
+
+Supported chips:
+
+  * Infineon XDPE12254
+
+    Prefix: 'xdpe12254'
+
+  * Infineon XDPE12284
+
+    Prefix: 'xdpe12284'
+
+Authors:
+
+       Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+This driver implements support for Infineon Multi-phase XDPE122 family
+dual loop voltage regulators.
+The family includes XDPE12284 and XDPE12254 devices.
+The devices from this family complaint with:
+- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
+  converter specification.
+- Intel SVID rev 1.9. protocol.
+- PMBus rev 1.3 interface.
+
+Devices support linear format for reading input voltage, input and output current,
+input and output power and temperature.
+Device supports VID format for reading output voltage. The below modes are
+supported:
+- VR12.0 mode, 5-mV DAC - 0x01.
+- VR12.5 mode, 10-mV DAC - 0x02.
+- IMVP9 mode, 5-mV DAC - 0x03.
+- AMD mode 6.25mV - 0x10.
+
+Devices support two pages for telemetry.
+
+The driver provides for current: input, maximum and critical thresholds
+and maximum and critical alarms. Critical thresholds and critical alarm are
+supported only for current output.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "iin" and 3, 4 for "iout":
+
+**curr[3-4]_crit**
+
+**curr[3-4]_crit_alarm**
+
+**curr[1-4]_input**
+
+**curr[1-4]_label**
+
+**curr[1-4]_max**
+
+**curr[1-4]_max_alarm**
+
+The driver provides for voltage: input, critical and low critical thresholds
+and critical and low critical alarms.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "vin" and 3, 4 for "vout":
+
+**in[1-4]_crit**
+
+**in[1-4_crit_alarm**
+
+**in[1-4]_input**
+
+**in[1-4_label**
+
+**in[1-4]_lcrit**
+
+**in[1-41_lcrit_alarm**
+
+The driver provides for power: input and alarms. Power alarm is supported only
+for power input.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "pin" and 3, 4 for "pout":
+
+**power[1-2]_alarm**
+
+**power[1-4]_input**
+
+**power[1-4]_label**
+
+The driver provides for temperature: input, maximum and critical thresholds
+and maximum and critical alarms.
+The driver exports the following attributes for via the sysfs files:
+
+**temp[1-2]_crit**
+
+**temp[1-2]_crit_alarm**
+
+**temp[1-2]_input**
+
+**temp[1-2]_max**
+
+**temp[1-2]_max_alarm**
index 74bef19f69f0a8078f2f988358ff57046b46210b..231e6a64957fcf12915c41b6a8d48d6978fa2690 100644 (file)
@@ -196,14 +196,11 @@ applicable everywhere (see syntax).
   or equal to the first symbol and smaller than or equal to the second
   symbol.
 
-- help text: "help" or "---help---"
+- help text: "help"
 
   This defines a help text. The end of the help text is determined by
   the indentation level, this means it ends at the first line which has
   a smaller indentation than the first line of the help text.
-  "---help---" and "help" do not differ in behaviour, "---help---" is
-  used to help visually separate configuration logic from help within
-  the file as an aid to developers.
 
 - misc options: "option" <symbol>[=<value>]
 
index b9b50553bfc56b5b5b50ea9513700a4421f7e232..d7e6534a8505eeb4f9dcc24b01ef94d674e8bf3e 100644 (file)
@@ -297,9 +297,19 @@ more details, with real examples.
        If CONFIG_EXT2_FS is set to either 'y' (built-in) or 'm' (modular)
        the corresponding obj- variable will be set, and kbuild will descend
        down in the ext2 directory.
-       Kbuild only uses this information to decide that it needs to visit
-       the directory, it is the Makefile in the subdirectory that
-       specifies what is modular and what is built-in.
+
+       Kbuild uses this information not only to decide that it needs to visit
+       the directory, but also to decide whether or not to link objects from
+       the directory into vmlinux.
+
+       When Kbuild descends into the directory with 'y', all built-in objects
+       from that directory are combined into the built-in.a, which will be
+       eventually linked into vmlinux.
+
+       When Kbuild descends into the directory with 'm', in contrast, nothing
+       from that directory will be linked into vmlinux. If the Makefile in
+       that directory specifies obj-y, those objects will be left orphan.
+       It is very likely a bug of the Makefile or of dependencies in Kconfig.
 
        It is good practice to use a `CONFIG_` variable when assigning directory
        names. This allows kbuild to totally skip the directory if the
index a572996cdbf60ab602e3d4ddaaf4658b746eff6a..dc57a6a91b43f121d3258f7ce7924f7cf4fc676a 100644 (file)
@@ -95,7 +95,7 @@ so all video4linux tools (like xawtv) should work with this driver.
 
 Besides the video4linux interface, the driver has a private interface
 for accessing the Motion Eye extended parameters (camera sharpness,
-agc, video framerate), the shapshot and the MJPEG capture facilities.
+agc, video framerate), the snapshot and the MJPEG capture facilities.
 
 This interface consists of several ioctls (prototypes and structures
 can be found in include/linux/meye.h):
index eef20d0bcf7c15183b7db65ba847dd47f2c45b60..64553d8d91cb9a6e65f1ac1d73b4fbe554d4abde 100644 (file)
@@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules
 against this restriction and errors out when appropriate. Schedule analysis is
 needed to avoid this, which is outside the scope of the document.
 
-At the moment, the time-aware scheduler can only be triggered based on a
-standalone clock and not based on PTP time. This means the base-time argument
-from tc-taprio is ignored and the schedule starts right away. It also means it
-is more difficult to phase-align the scheduler with the other devices in the
-network.
-
 Device Tree bindings and board design
 =====================================
 
index fd26788e8c96c66716babef9683733af4deefb1a..48ccb1b31160287f5402b93a3105ff6f6aff0257 100644 (file)
@@ -603,7 +603,7 @@ tcp_synack_retries - INTEGER
        with the current initial RTO of 1second. With this the final timeout
        for a passive TCP connection will happen after 63seconds.
 
-tcp_syncookies - BOOLEAN
+tcp_syncookies - INTEGER
        Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
        Send out syncookies when the syn backlog queue of a socket
        overflows. This is to prevent against the common 'SYN flood attack'
index dc60b13fcd09653a95eda517e9ccdc38e50c26cf..f5be243d250a40e75d8bdef11b2489b56d7af691 100644 (file)
@@ -339,7 +339,7 @@ To claim an address following code example can be used:
                        .pgn = J1939_PGN_ADDRESS_CLAIMED,
                        .pgn_mask = J1939_PGN_PDU1_MAX,
                }, {
-                       .pgn = J1939_PGN_ADDRESS_REQUEST,
+                       .pgn = J1939_PGN_REQUEST,
                        .pgn_mask = J1939_PGN_PDU1_MAX,
                }, {
                        .pgn = J1939_PGN_ADDRESS_COMMANDED,
index 642fa963be3cf8f325c29947072e6c6851213d4b..d5c9320901c3273b99bcc1b4cda78c5472d9f6fd 100644 (file)
@@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the
 mainline tree from Linus, and ``net-next`` is where the new code goes
 for the future release.  You can find the trees here:
 
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 
 Q: How often do changes from these trees make it to the mainline Linus tree?
 ----------------------------------------------------------------------------
index ada573b7d703bca0b0ab725e2714f84f6dd2bd1f..edb296c52f61e4b6c1b7b67a30302260ffcb015c 100644 (file)
@@ -988,7 +988,7 @@ Similarly, if you need to calculate the size of some structure member, use
 
 .. code-block:: c
 
-       #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+       #define sizeof_field(t, f) (sizeof(((t*)0)->f))
 
 There are also min() and max() macros that do strict type checking if you
 need them.  Feel free to peruse that header file to see what else is already
index 799580acc8dec621b04aabec3211f4e2a78727cc..5d54946cfc750a9a86a6238cc2939dfe8ba3ca6c 100644 (file)
@@ -255,7 +255,7 @@ an involved disclosed party. The current ambassadors list:
   Red Hat      Josh Poimboeuf <jpoimboe@redhat.com>
   SUSE         Jiri Kosina <jkosina@suse.cz>
 
-  Amazon
+  Amazon       Peter Bowen <pzb@amzn.com>
   Google       Kees Cook <keescook@chromium.org>
   ============= ========================================================
 
index 21aa7d5358e621ab991ff9116278ce35c64b41ad..6399d92f0b21d6db95a6c4893dde1f0d243108c8 100644 (file)
@@ -60,6 +60,7 @@ lack of a better place.
    volatile-considered-harmful
    botching-up-ioctls
    clang-format
+   ../riscv/patch-acceptance
 
 .. only::  subproject and html
 
index 215fd3c1f2d57f5397b469ee6c42b17be7025732..fa33bffd8992771153162189cc05753d844097a8 100644 (file)
@@ -7,6 +7,7 @@ RISC-V architecture
 
     boot-image-header
     pmu
+    patch-acceptance
 
 .. only::  subproject and html
 
diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/riscv/patch-acceptance.rst
new file mode 100644 (file)
index 0000000..dfe0ac5
--- /dev/null
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+arch/riscv maintenance guidelines for developers
+================================================
+
+Overview
+--------
+The RISC-V instruction set architecture is developed in the open:
+in-progress drafts are available for all to review and to experiment
+with implementations.  New module or extension drafts can change
+during the development process - sometimes in ways that are
+incompatible with previous drafts.  This flexibility can present a
+challenge for RISC-V Linux maintenance.  Linux maintainers disapprove
+of churn, and the Linux development process prefers well-reviewed and
+tested code over experimental code.  We wish to extend these same
+principles to the RISC-V-related code that will be accepted for
+inclusion in the kernel.
+
+Submit Checklist Addendum
+-------------------------
+We'll only accept patches for new modules or extensions if the
+specifications for those modules or extensions are listed as being
+"Frozen" or "Ratified" by the RISC-V Foundation.  (Developers may, of
+course, maintain their own Linux kernel trees that contain code for
+any draft extensions that they wish.)
+
+Additionally, the RISC-V specification allows implementors to create
+their own custom extensions.  These custom extensions aren't required
+to go through any review or ratification process by the RISC-V
+Foundation.  To avoid the maintenance complexity and potential
+performance impact of adding kernel code for implementor-specific
+RISC-V extensions, we'll only to accept patches for extensions that
+have been officially frozen or ratified by the RISC-V Foundation.
+(Implementors, may, of course, maintain their own Linux kernel trees
+containing code for any custom extensions that they wish.)
index 201f80c7c0506e5e5772006b9f874d4081920d6f..df129f55ace5e768740312b0b60b903b76872ab6 100644 (file)
@@ -29,7 +29,7 @@ smartpqi specific entries in /sys
   smartpqi host attributes:
   -------------------------
   /sys/class/scsi_host/host*/rescan
-  /sys/class/scsi_host/host*/version
+  /sys/class/scsi_host/host*/driver_version
 
   The host rescan attribute is a write only attribute. Writing to this
   attribute will trigger the driver to scan for new, changed, or removed
index f169d58ca019a30f212c81e93c3f1a64b55802cf..ddef812ddf8fb429762156ef6008113e8ab50641 100644 (file)
@@ -1058,7 +1058,7 @@ and the allocation would be like below:
           return err;
   }
   chip->iobase_phys = pci_resource_start(pci, 0);
-  chip->iobase_virt = ioremap_nocache(chip->iobase_phys,
+  chip->iobase_virt = ioremap(chip->iobase_phys,
                                       pci_resource_len(pci, 0));
 
 and the corresponding destructor would be:
index 8995d2d19f202e65d37d960705991c314a5bfcaa..8725f2b9e96032d37ac6a3c379ce254fd652e5d8 100644 (file)
@@ -1005,7 +1005,7 @@ struttura, usate
 
 .. code-block:: c
 
-       #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+       #define sizeof_field(t, f) (sizeof(((t*)0)->f))
 
 Ci sono anche le macro min() e max() che, se vi serve, effettuano un controllo
 rigido sui tipi.  Sentitevi liberi di leggere attentamente questo file
index 4f6237392e65ad33b393a337d4d898314c600290..eae10bc7f86f2a096f970e6464087c7f31b9aea8 100644 (file)
@@ -826,7 +826,7 @@ inline gcc 也可以自动使其内联。而且其他用户可能会要求移除
 
 .. code-block:: c
 
-       #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+       #define sizeof_field(t, f) (sizeof(((t*)0)->f))
 
 还有可以做严格的类型检查的 min() 和 max() 宏,如果你需要可以使用它们。你可以
 自己看看那个头文件里还定义了什么你可以拿来用的东西,如果有定义的话,你就不应
index 9a298fd97d74a41b12af0b302ac18f3819ed116e..5d901771016d2a003b59b6e9366386e32c126a75 100644 (file)
@@ -44,8 +44,6 @@ address range to avoid any aliasing.
 +------------------------+----------+--------------+------------------+
 | ioremap_uc             |    --    |    UC        |       UC         |
 +------------------------+----------+--------------+------------------+
-| ioremap_nocache        |    --    |    UC-       |       UC-        |
-+------------------------+----------+--------------+------------------+
 | ioremap_wc             |    --    |    --        |       WC         |
 +------------------------+----------+--------------+------------------+
 | ioremap_wt             |    --    |    --        |       WT         |
index bd5847e802defb11887f45dd17412e1e98d054e8..141b8d3e4ca2498400975dc15e6f18ba0e68d299 100644 (file)
@@ -345,7 +345,7 @@ F:  drivers/acpi/apei/
 
 ACPI COMPONENT ARCHITECTURE (ACPICA)
 M:     Robert Moore <robert.moore@intel.com>
-M:     Erik Schmauss <erik.schmauss@intel.com>
+M:     Erik Kaneda <erik.kaneda@intel.com>
 M:     "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 L:     linux-acpi@vger.kernel.org
 L:     devel@acpica.org
@@ -720,7 +720,7 @@ F:  Documentation/devicetree/bindings/i2c/i2c-altera.txt
 F:     drivers/i2c/busses/i2c-altera.c
 
 ALTERA MAILBOX DRIVER
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/mailbox/mailbox-altera.c
@@ -771,6 +771,8 @@ F:  drivers/thermal/thermal_mmio.c
 
 AMAZON ETHERNET DRIVERS
 M:     Netanel Belgazal <netanel@amazon.com>
+M:     Arthur Kiyanovski <akiyano@amazon.com>
+R:     Guy Tzalik <gtzalik@amazon.com>
 R:     Saeed Bishara <saeedb@amazon.com>
 R:     Zorik Machulsky <zorik@amazon.com>
 L:     netdev@vger.kernel.org
@@ -975,6 +977,15 @@ W: http://ez.analog.com/community/linux-device-drivers
 F:     drivers/iio/imu/adis16460.c
 F:     Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml
 
+ANALOG DEVICES INC ADM1177 DRIVER
+M:     Beniamin Bia <beniamin.bia@analog.com>
+M:     Michael Hennerich <Michael.Hennerich@analog.com>
+L:     linux-hwmon@vger.kernel.org
+W:     http://ez.analog.com/community/linux-device-drivers
+S:     Supported
+F:     drivers/hwmon/adm1177.c
+F:     Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
+
 ANALOG DEVICES INC ADP5061 DRIVER
 M:     Stefan Popa <stefan.popa@analog.com>
 L:     linux-pm@vger.kernel.org
@@ -1405,7 +1416,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
 
 ARM/ACTIONS SEMI ARCHITECTURE
 M:     Andreas Färber <afaerber@suse.de>
-R:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 N:     owl
@@ -2240,6 +2251,7 @@ L:        linux-rockchip@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+F:     Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
 F:     arch/arm/boot/dts/rk3*
 F:     arch/arm/boot/dts/rv1108*
 F:     arch/arm/mach-rockchip/
@@ -2272,6 +2284,7 @@ F:        drivers/*/*s3c64xx*
 F:     drivers/*/*s5pv210*
 F:     drivers/memory/samsung/
 F:     drivers/soc/samsung/
+F:     drivers/tty/serial/samsung*
 F:     include/linux/soc/samsung/
 F:     Documentation/arm/samsung/
 F:     Documentation/devicetree/bindings/arm/samsung/
@@ -2691,6 +2704,14 @@ S:       Maintained
 F:     drivers/pinctrl/aspeed/
 F:     Documentation/devicetree/bindings/pinctrl/aspeed,*
 
+ASPEED SCU INTERRUPT CONTROLLER DRIVER
+M:     Eddie James <eajames@linux.ibm.com>
+L:     linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2xxx-scu-ic.txt
+F:     drivers/irqchip/irq-aspeed-scu-ic.c
+F:     include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+
 ASPEED VIDEO ENGINE DRIVER
 M:     Eddie James <eajames@linux.ibm.com>
 L:     linux-media@vger.kernel.org
@@ -3147,7 +3168,7 @@ S:        Maintained
 F:     arch/mips/net/
 
 BPF JIT for NFP NICs
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Supported
@@ -4970,6 +4991,7 @@ F:        include/linux/dma-buf*
 F:     include/linux/reservation.h
 F:     include/linux/*fence.h
 F:     Documentation/driver-api/dma-buf.rst
+K:     dma_(buf|fence|resv)
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
@@ -4999,7 +5021,7 @@ F:        include/linux/dma-mapping.h
 F:     include/linux/dma-noncoherent.h
 
 DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422
-M:     Lukasz Luba <l.luba@partner.samsung.com>
+M:     Lukasz Luba <lukasz.luba@arm.com>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -6025,6 +6047,7 @@ M:        Yash Shah <yash.shah@sifive.com>
 L:     linux-edac@vger.kernel.org
 S:     Supported
 F:     drivers/edac/sifive_edac.c
+F:     drivers/soc/sifive_l2_cache.c
 
 EDAC-SKYLAKE
 M:     Tony Luck <tony.luck@intel.com>
@@ -6192,6 +6215,7 @@ ETHERNET PHY LIBRARY
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Heiner Kallweit <hkallweit1@gmail.com>
+R:     Russell King <linux@armlinux.org.uk>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-net-phydev
@@ -7031,6 +7055,7 @@ L:        linux-acpi@vger.kernel.org
 S:     Maintained
 F:     Documentation/firmware-guide/acpi/gpio-properties.rst
 F:     drivers/gpio/gpiolib-acpi.c
+F:     drivers/gpio/gpiolib-acpi.h
 
 GPIO IR Transmitter
 M:     Sean Young <sean@mess.org>
@@ -7492,6 +7517,12 @@ S:       Supported
 F:     drivers/scsi/hisi_sas/
 F:     Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
 
+HISILICON V3XX SPI NOR FLASH Controller Driver
+M:     John Garry <john.garry@huawei.com>
+W:     http://www.hisilicon.com
+S:     Maintained
+F:     drivers/spi/spi-hisi-sfc-v3xx.c
+
 HISILICON QM AND ZIP Controller DRIVER
 M:     Zhou Wang <wangzhou1@hisilicon.com>
 L:     linux-crypto@vger.kernel.org
@@ -7835,10 +7866,10 @@ F:      Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
 F:     drivers/i3c/master/dw*
 
 I3C DRIVER FOR CADENCE I3C MASTER IP
-M:      Przemysław Gaj <pgaj@cadence.com>
-S:      Maintained
-F:      Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
-F:      drivers/i3c/master/i3c-master-cdns.c
+M:     Przemysław Gaj <pgaj@cadence.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+F:     drivers/i3c/master/i3c-master-cdns.c
 
 IA64 (Itanium) PLATFORM
 M:     Tony Luck <tony.luck@intel.com>
@@ -8375,6 +8406,14 @@ Q:       https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Supported
 F:     drivers/dma/ioat*
 
+INTEL IADX DRIVER
+M:     Dave Jiang <dave.jiang@intel.com>
+L:     dmaengine@vger.kernel.org
+S:     Supported
+F:     drivers/dma/idxd/*
+F:     include/uapi/linux/idxd.h
+F:     include/linux/idxd.h
+
 INTEL IDLE DRIVER
 M:     Jacob Pan <jacob.jun.pan@linux.intel.com>
 M:     Len Brown <lenb@kernel.org>
@@ -8556,6 +8595,12 @@ S:       Maintained
 F:     arch/x86/include/asm/intel_telemetry.h
 F:     drivers/platform/x86/intel_telemetry*
 
+INTEL UNCORE FREQUENCY CONTROL
+M:     Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/intel-uncore-frequency.c
+
 INTEL VIRTUAL BUTTON DRIVER
 M:     AceLan Kao <acelan.kao@canonical.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -8563,7 +8608,7 @@ S:        Maintained
 F:     drivers/platform/x86/intel-vbtn.c
 
 INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
-M:     Stanislaw Gruszka <sgruszka@redhat.com>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/net/wireless/intel/iwlegacy/
@@ -9038,7 +9083,6 @@ F:        include/linux/umh.h
 
 KERNEL VIRTUAL MACHINE (KVM)
 M:     Paolo Bonzini <pbonzini@redhat.com>
-M:     Radim Krčmář <rkrcmar@redhat.com>
 L:     kvm@vger.kernel.org
 W:     http://www.linux-kvm.org
 T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -9073,9 +9117,9 @@ F:        virt/kvm/arm/
 F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
-M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@vger.kernel.org
-S:     Supported
+L:     kvm@vger.kernel.org
+S:     Orphan
 F:     arch/mips/include/uapi/asm/kvm*
 F:     arch/mips/include/asm/kvm*
 F:     arch/mips/kvm/
@@ -9110,7 +9154,6 @@ F:        tools/testing/selftests/kvm/*/s390x/
 
 KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
 M:     Paolo Bonzini <pbonzini@redhat.com>
-M:     Radim Krčmář <rkrcmar@redhat.com>
 R:     Sean Christopherson <sean.j.christopherson@intel.com>
 R:     Vitaly Kuznetsov <vkuznets@redhat.com>
 R:     Wanpeng Li <wanpengli@tencent.com>
@@ -10108,6 +10151,7 @@ S:      Maintained
 F:     drivers/media/radio/radio-maxiradio*
 
 MCAN MMIO DEVICE DRIVER
+M:     Dan Murphy <dmurphy@ti.com>
 M:     Sriram Dash <sriram.dash@samsung.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
@@ -11138,6 +11182,13 @@ S:     Maintained
 F:     Documentation/driver-api/serial/moxa-smartio.rst
 F:     drivers/tty/mxser.*
 
+MONOLITHIC POWER SYSTEM PMIC DRIVER
+M:     Saravanan Sekar <sravanhome@gmail.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/regulator/mpq7920.yaml
+F:     drivers/regulator/mpq7920.c
+F:     drivers/regulator/mpq7920.h
+
 MR800 AVERMEDIA USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -11426,7 +11477,7 @@ F:      include/uapi/linux/netrom.h
 F:     net/netrom/
 
 NETRONOME ETHERNET DRIVERS
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     oss-drivers@netronome.com
 S:     Maintained
 F:     drivers/net/ethernet/netronome/
@@ -11455,8 +11506,8 @@ M:      "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 S:     Odd Fixes
 F:     Documentation/devicetree/bindings/net/
 F:     drivers/net/
@@ -11494,11 +11545,12 @@ F:    drivers/net/dsa/
 
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 B:     mailto:netdev@vger.kernel.org
 S:     Maintained
 F:     net/
@@ -11543,7 +11595,7 @@ M:      "David S. Miller" <davem@davemloft.net>
 M:     Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 M:     Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 L:     netdev@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 S:     Maintained
 F:     net/ipv4/
 F:     net/ipv6/
@@ -11586,7 +11638,7 @@ M:      Boris Pismenny <borisp@mellanox.com>
 M:     Aviad Yehezkel <aviadye@mellanox.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     net/tls/*
@@ -11598,7 +11650,7 @@ L:      linux-wireless@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-wireless/list/
 
 NETDEVSIM
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 S:     Maintained
 F:     drivers/net/netdevsim/*
 
@@ -11675,7 +11727,7 @@ F:      Documentation/scsi/NinjaSCSI.txt
 F:     drivers/scsi/nsp32*
 
 NIOS2 ARCHITECTURE
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
 S:     Maintained
@@ -12393,7 +12445,7 @@ L:      linux-unionfs@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
 S:     Supported
 F:     fs/overlayfs/
-F:     Documentation/filesystems/overlayfs.txt
+F:     Documentation/filesystems/overlayfs.rst
 
 P54 WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@googlemail.com>
@@ -12559,7 +12611,7 @@ F:      Documentation/devicetree/bindings/pci/aardvark-pci.txt
 F:     drivers/pci/controller/pci-aardvark.c
 
 PCI DRIVER FOR ALTERA PCIE IP
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     rfi@lists.rocketboards.org (moderated for non-subscribers)
 L:     linux-pci@vger.kernel.org
 S:     Supported
@@ -12738,7 +12790,7 @@ S:      Supported
 F:     Documentation/PCI/pci-error-recovery.rst
 
 PCI MSI DRIVER FOR ALTERA MSI IP
-M:     Ley Foon Tan <lftan@altera.com>
+M:     Ley Foon Tan <ley.foon.tan@intel.com>
 L:     rfi@lists.rocketboards.org (moderated for non-subscribers)
 L:     linux-pci@vger.kernel.org
 S:     Supported
@@ -13139,6 +13191,11 @@ S:     Maintained
 F:     drivers/iio/chemical/pms7003.c
 F:     Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
 
+PLX DMA DRIVER
+M:     Logan Gunthorpe <logang@deltatee.com>
+S:     Maintained
+F:     drivers/dma/plx_dma.c
+
 PMBUS HARDWARE MONITORING DRIVERS
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
@@ -13209,6 +13266,8 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Maintained
 F:     fs/timerfd.c
 F:     include/linux/timer*
+F:     include/linux/time_namespace.h
+F:     kernel/time_namespace.c
 F:     kernel/time/*timer*
 
 POWER MANAGEMENT CORE
@@ -13666,6 +13725,14 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
 F:     drivers/cpufreq/qcom-cpufreq-nvmem.c
 
+QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
+M:     Niklas Cassel <nks@flawful.org>
+L:     linux-pm@vger.kernel.org
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/power/avs/qcom,cpr.txt
+F:     drivers/power/avs/qcom-cpr.c
+
 QUALCOMM EMAC GIGABIT ETHERNET DRIVER
 M:     Timur Tabi <timur@kernel.org>
 L:     netdev@vger.kernel.org
@@ -13674,7 +13741,6 @@ F:      drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM ETHQOS ETHERNET DRIVER
 M:     Vinod Koul <vkoul@kernel.org>
-M:     Niklas Cassel <niklas.cassel@linaro.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -13708,6 +13774,15 @@ L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/qcom_iommu.c
 
+QUALCOMM RMNET DRIVER
+M:     Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+M:     Sean Tranchetti <stranche@codeaurora.org>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/qualcomm/rmnet/
+F:     Documentation/networking/device_drivers/qualcomm/rmnet.txt
+F:     include/linux/if_rmnet.h
+
 QUALCOMM TSENS THERMAL DRIVER
 M:     Amit Kucheria <amit.kucheria@linaro.org>
 L:     linux-pm@vger.kernel.org
@@ -13807,7 +13882,7 @@ S:      Maintained
 F:     arch/mips/ralink
 
 RALINK RT2X00 WIRELESS LAN DRIVER
-M:     Stanislaw Gruszka <sgruszka@redhat.com>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
 M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
@@ -14107,6 +14182,7 @@ M:      Paul Walmsley <paul.walmsley@sifive.com>
 M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Albert Ou <aou@eecs.berkeley.edu>
 L:     linux-riscv@lists.infradead.org
+P:     Documentation/riscv/patch-acceptance.rst
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
 S:     Supported
 F:     arch/riscv/
@@ -14534,8 +14610,6 @@ F:      include/linux/platform_data/spi-s3c64xx.h
 
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
-M:     Girish K S <ks.giri@samsung.com>
-M:     Vipul Pandya <vipul.pandya@samsung.com>
 S:     Supported
 L:     netdev@vger.kernel.org
 F:     drivers/net/ethernet/samsung/sxgbe/
@@ -14806,6 +14880,7 @@ F:      include/uapi/linux/selinux_netlink.h
 F:     security/selinux/
 F:     scripts/selinux/
 F:     Documentation/admin-guide/LSM/SELinux.rst
+F:     Documentation/ABI/obsolete/sysfs-selinux-disable
 
 SENSABLE PHANTOM
 M:     Jiri Slaby <jirislaby@gmail.com>
@@ -16314,12 +16389,10 @@ F:    drivers/media/radio/radio-raremono.c
 
 THERMAL
 M:     Zhang Rui <rui.zhang@intel.com>
-M:     Eduardo Valentin <edubezval@gmail.com>
-R:     Daniel Lezcano <daniel.lezcano@linaro.org>
+M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 R:     Amit Kucheria <amit.kucheria@verdurent.com>
 L:     linux-pm@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git
 Q:     https://patchwork.kernel.org/project/linux-pm/list/
 S:     Supported
 F:     drivers/thermal/
@@ -16533,6 +16606,13 @@ L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Odd Fixes
 F:     sound/soc/codecs/tas571x*
 
+TI TCAN4X5X DEVICE DRIVER
+M:     Dan Murphy <dmurphy@ti.com>
+L:     linux-can@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+F:     drivers/net/can/m_can/tcan4x5x.c
+
 TI TRF7970A NFC DRIVER
 M:     Mark Greer <mgreer@animalcreek.com>
 L:     linux-wireless@vger.kernel.org
@@ -16584,7 +16664,7 @@ F:      kernel/time/ntp.c
 F:     tools/testing/selftests/timers/
 
 TIPC NETWORK LAYER
-M:     Jon Maloy <jon.maloy@ericsson.com>
+M:     Jon Maloy <jmaloy@redhat.com>
 M:     Ying Xue <ying.xue@windriver.com>
 L:     netdev@vger.kernel.org (core kernel code)
 L:     tipc-discussion@lists.sourceforge.net (user apps, general discussion)
@@ -18025,7 +18105,7 @@ XDP (eXpress Data Path)
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     David S. Miller <davem@davemloft.net>
-M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+M:     Jakub Kicinski <kuba@kernel.org>
 M:     Jesper Dangaard Brouer <hawk@kernel.org>
 M:     John Fastabend <john.fastabend@gmail.com>
 L:     netdev@vger.kernel.org
index 73e3c280292770ccd4b5bb1be95c2fca391bfba8..6a01b073915e213e482e0b74d20f8d109ef295c3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -414,6 +414,7 @@ STRIP               = $(CROSS_COMPILE)strip
 OBJCOPY                = $(CROSS_COMPILE)objcopy
 OBJDUMP                = $(CROSS_COMPILE)objdump
 OBJSIZE                = $(CROSS_COMPILE)size
+READELF                = $(CROSS_COMPILE)readelf
 PAHOLE         = pahole
 LEX            = flex
 YACC           = bison
@@ -472,7 +473,7 @@ GCC_PLUGINS_CFLAGS :=
 CLANG_FLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL
+export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
 export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
 
index 1989b946a28dd2b0ed6f937457f5914d2a9ff5d9..d1ed5a8133c5df21e357bd4aae2ecc9f53b8f1a1 100644 (file)
@@ -283,14 +283,8 @@ static inline void __iomem *ioremap(unsigned long port, unsigned long size)
        return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
 }
 
-static inline void __iomem * ioremap_nocache(unsigned long offset,
-                                            unsigned long size)
-{
-       return ioremap(offset, size);
-}
-
-#define ioremap_wc ioremap_nocache
-#define ioremap_uc ioremap_nocache
+#define ioremap_wc ioremap
+#define ioremap_uc ioremap
 
 static inline void iounmap(volatile void __iomem *addr)
 {
diff --git a/arch/alpha/include/asm/vmalloc.h b/arch/alpha/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..0a9a366
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_ALPHA_VMALLOC_H
+#define _ASM_ALPHA_VMALLOC_H
+
+#endif /* _ASM_ALPHA_VMALLOC_H */
index 26108ea785c2644d653c6f3e2228521b3f0d0f82..5f448201955bc53bc217c92fc717a3409480d92f 100644 (file)
@@ -13,7 +13,7 @@ config ARC
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
        select ARCH_32BIT_OFF_T
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
        select DMA_DIRECT_REMAP
index 41b16f21beecac28099ed7ee154db6947b6b0d14..0b8b63d0bec153285e1e4888a07a4fe9db8f40bf 100644 (file)
 #endif
 
 #ifdef CONFIG_ARC_HAS_ACCL_REGS
-       ST2     r58, r59, PT_sp + 12
+       ST2     r58, r59, PT_r58
 #endif
 
 .endm
 
        LD2     gp, fp, PT_r26          ; gp (r26), fp (r27)
 
-       ld      r12, [sp, PT_sp + 4]
-       ld      r30, [sp, PT_sp + 8]
+       ld      r12, [sp, PT_r12]
+       ld      r30, [sp, PT_r30]
 
        ; Restore SP (into AUX_USER_SP) only if returning to U mode
        ;  - for K mode, it will be implicitly restored as stack is unwound
 #endif
 
 #ifdef CONFIG_ARC_HAS_ACCL_REGS
-       LD2     r58, r59, PT_sp + 12
+       LD2     r58, r59, PT_r58
 #endif
 .endm
 
index 9a74ce71a7674f99349b51ec95cc44915bcc1aff..30ac40fed2c5c5affb5632e3979b84975d41698c 100644 (file)
@@ -8,7 +8,6 @@
 #define _ASM_ARC_HUGEPAGE_H
 
 #include <linux/types.h>
-#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 static inline pte_t pmd_pte(pmd_t pmd)
diff --git a/arch/arc/include/asm/vmalloc.h b/arch/arc/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..973095a
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARC_VMALLOC_H
+#define _ASM_ARC_VMALLOC_H
+
+#endif /* _ASM_ARC_VMALLOC_H */
index 1f621e416521f8d1651453da9bee876ab864d21b..c783bcd35eb88c88d8ac39f2af097918bcf26be9 100644 (file)
@@ -66,7 +66,15 @@ int main(void)
 
        DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
        DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
-       DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
+
+#ifdef CONFIG_ISA_ARCV2
+       OFFSET(PT_r12, pt_regs, r12);
+       OFFSET(PT_r30, pt_regs, r30);
+#endif
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       OFFSET(PT_r58, pt_regs, r58);
+       OFFSET(PT_r59, pt_regs, r59);
+#endif
 
        return 0;
 }
index 72be01270e24632d1b53756f03dfacdd19442db5..1f6bb184a44dbedb7686cf82b130c6ff93292775 100644 (file)
@@ -337,11 +337,11 @@ resume_user_mode_begin:
 resume_kernel_mode:
 
        ; Disable Interrupts from this point on
-       ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
-       ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
+       ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq()
+       ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe
        IRQ_DISABLE     r9
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 
        ; Can't preempt if preemption disabled
        GET_CURR_THR_INFO_FROM_SP   r10
index dc05a63516f5b5a3c695578cd864b2f3886910ba..27ea64b1fa3321c3d86f2c25b419e1fd1882b269 100644 (file)
@@ -42,10 +42,10 @@ do {                                                \
 
 #define EXTRA_INFO(f) { \
                BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
-                               % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+                               % sizeof_field(struct unwind_frame_info, f)) \
                                + offsetof(struct unwind_frame_info, f) \
-                               / FIELD_SIZEOF(struct unwind_frame_info, f), \
-                               FIELD_SIZEOF(struct unwind_frame_info, f) \
+                               / sizeof_field(struct unwind_frame_info, f), \
+                               sizeof_field(struct unwind_frame_info, f) \
        }
 #define PTREGS_INFO(f) EXTRA_INFO(regs.f)
 
index a376a50d3fea8f8d1a478f702beb8843630ae176..a931d0a256d01ab0e78d4b8402c77d2567792bff 100644 (file)
@@ -7,7 +7,7 @@
 menuconfig ARC_PLAT_EZNPS
        bool "\"EZchip\" ARC dev platform"
        select CPU_BIG_ENDIAN
-       select CLKSRC_NPS
+       select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
        select EZNPS_GIC
        select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
        help
index ba75e3661a4159afa45ac15516db9e16d2fdae29..0b1b1c66bce92fb7471d48e388dea8f29b303146 100644 (file)
@@ -36,7 +36,7 @@ config ARM
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_IPC_PARSE_VERSION
        select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
-       select BUILDTIME_EXTABLE_SORT if MMU
+       select BUILDTIME_TABLE_SORT if MMU
        select CLONE_BACKWARDS
        select CPU_PM if SUSPEND || CPU_IDLE
        select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -72,6 +72,7 @@ config ARM
        select HAVE_ARM_SMCCC if CPU_V7
        select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
        select HAVE_CONTEXT_TRACKING
+       select HAVE_COPY_THREAD_TLS
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS if MMU
index 7ad079861efd6ce84e1ba88a3d0df00ba5a42985..91f93bc89716d68f915438e77c216bac429194f8 100644 (file)
 };
 
 / {
+       memory@80000000 {
+               device_type = "memory";
+               reg = <0x80000000 0x20000000>; /* 512 MB */
+       };
+
        clk_mcasp0_fixed: clk_mcasp0_fixed {
                #clock-cells = <0>;
                compatible = "fixed-clock";
index 8678e6e35493fc7b6548942ed643feec16a51407..e5fdb7abb0d54a47eceb026009d1ef1173de7e7f 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii-txid";
+       phy-mode = "rgmii-id";
 };
 
 &i2c0 {
index cae4500194fecb5e9a7d5c370300fea5ff550148..811c8cae315b520f445964fc07dc00e16266b5f9 100644 (file)
@@ -86,7 +86,7 @@
                };
 
        lcd0: display {
-               compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+               compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
                label = "lcd";
 
                backlight = <&lcd_bl>;
index 95314121d11153ba8b9330728bab031f40585fda..a6fbc088daa8630666a2eb8abf36a37142c7dec7 100644 (file)
@@ -42,7 +42,7 @@
        };
 
        lcd0: display {
-               compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+               compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
                label = "lcd";
 
                backlight = <&lcd_bl>;
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&spi0_pins_default>;
        pinctrl-1 = <&spi0_pins_sleep>;
+       ti,pindir-d0-out-d1-in = <1>;
 };
 
 &spi1 {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&spi1_pins_default>;
        pinctrl-1 = <&spi1_pins_sleep>;
+       ti,pindir-d0-out-d1-in = <1>;
 };
 
 &usb2_phy1 {
index 820ce3b60bb6c50a5b7a44b457e428b4383536d4..669559c9c95b3a83c93e412d7f526cbc6131077c 100644 (file)
 
 &pcie1_rc {
        status = "okay";
-       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-};
-
-&pcie1_ep {
-       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+       gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
 };
 
 &mmc1 {
index a064f13b38802d1483796a7e5c5e93c8e03bb640..ddf123620e962ed71e155136ce1ccf48cf4cf19d 100644 (file)
        gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
 };
 
-&pcie1_ep {
-       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-};
-
 &mailbox5 {
        status = "okay";
        mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
index bc76f1705c0f667dab3ad9af455b5f7685823e08..a813a0cf3ff39a97af53723a5973fe721a1c0117 100644 (file)
                reg = <0x0 0x80000000 0x0 0x80000000>;
        };
 
+       main_12v0: fixedregulator-main_12v0 {
+               /* main supply */
+               compatible = "regulator-fixed";
+               regulator-name = "main_12v0";
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
+               regulator-always-on;
+               regulator-boot-on;
+       };
+
+       evm_5v0: fixedregulator-evm_5v0 {
+               /* Output of TPS54531D */
+               compatible = "regulator-fixed";
+               regulator-name = "evm_5v0";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               vin-supply = <&main_12v0>;
+               regulator-always-on;
+               regulator-boot-on;
+       };
+
        vdd_3v3: fixedregulator-vdd_3v3 {
                compatible = "regulator-fixed";
                regulator-name = "vdd_3v3";
        gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
 };
 
-&pcie1_ep {
-       gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
-};
-
 &mcasp3 {
        #sound-dai-cells = <0>;
        assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
index c1c9cd30f9803cd20cf7bdcb4f15b9479a4e0b39..13f7aefe045e6d59581bcf4d37ed4e328917c507 100644 (file)
                };
        };
 
-       pca0: pca9552@60 {
+       pca0: pca9552@61 {
                compatible = "nxp,pca9552";
-               reg = <0x60>;
+               reg = <0x61>;
                #address-cells = <1>;
                #size-cells = <0>;
 
        status = "okay";
 };
 
-&i2c13 {
-       status = "okay";
-};
-
-&i2c14 {
-       status = "okay";
-};
-
-&i2c15 {
-       status = "okay";
-};
-
-&i2c0 {
-       status = "okay";
-};
-
-&i2c1 {
-       status = "okay";
-};
-
-&i2c2 {
-       status = "okay";
-};
-
-&i2c3 {
-       status = "okay";
-
-       power-supply@68 {
-               compatible = "ibm,cffps2";
-               reg = <0x68>;
-       };
-
-       power-supply@69 {
-               compatible = "ibm,cffps2";
-               reg = <0x69>;
-       };
-
-       power-supply@6a {
-               compatible = "ibm,cffps2";
-               reg = <0x6a>;
-       };
-
-       power-supply@6b {
-               compatible = "ibm,cffps2";
-               reg = <0x6b>;
-       };
-};
-
-&i2c4 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@49 {
-               compatible = "ti,tmp275";
-               reg = <0x49>;
-       };
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-};
-
-&i2c5 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@49 {
-               compatible = "ti,tmp275";
-               reg = <0x49>;
-       };
-};
-
-&i2c6 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-
-       tmp275@4b {
-               compatible = "ti,tmp275";
-               reg = <0x4b>;
-       };
-};
-
-&i2c7 {
-       status = "okay";
-
-       si7021-a20@20 {
-               compatible = "silabs,si7020";
-               reg = <0x20>;
-       };
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       max31785@52 {
-               compatible = "maxim,max31785a";
-               reg = <0x52>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               fan@0 {
-                       compatible = "pmbus-fan";
-                       reg = <0>;
-                       tach-pulses = <2>;
-               };
-
-               fan@1 {
-                       compatible = "pmbus-fan";
-                       reg = <1>;
-                       tach-pulses = <2>;
-               };
-
-               fan@2 {
-                       compatible = "pmbus-fan";
-                       reg = <2>;
-                       tach-pulses = <2>;
-               };
-
-               fan@3 {
-                       compatible = "pmbus-fan";
-                       reg = <3>;
-                       tach-pulses = <2>;
-               };
-       };
-
-       pca0: pca9552@60 {
-               compatible = "nxp,pca9552";
-               reg = <0x60>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               gpio-controller;
-               #gpio-cells = <2>;
-
-               gpio@0 {
-                       reg = <0>;
-               };
-
-               gpio@1 {
-                       reg = <1>;
-               };
-
-               gpio@2 {
-                       reg = <2>;
-               };
-
-               gpio@3 {
-                       reg = <3>;
-               };
-
-               gpio@4 {
-                       reg = <4>;
-               };
-
-               gpio@5 {
-                       reg = <5>;
-               };
-
-               gpio@6 {
-                       reg = <6>;
-               };
-
-               gpio@7 {
-                       reg = <7>;
-               };
-
-               gpio@8 {
-                       reg = <8>;
-               };
-
-               gpio@9 {
-                       reg = <9>;
-               };
-
-               gpio@10 {
-                       reg = <10>;
-               };
-
-               gpio@11 {
-                       reg = <11>;
-               };
-
-               gpio@12 {
-                       reg = <12>;
-               };
-
-               gpio@13 {
-                       reg = <13>;
-               };
-
-               gpio@14 {
-                       reg = <14>;
-               };
-
-               gpio@15 {
-                       reg = <15>;
-               };
-       };
-
-       dps: dps310@76 {
-               compatible = "infineon,dps310";
-               reg = <0x76>;
-               #io-channel-cells = <0>;
-       };
-};
-
-&i2c8 {
-       status = "okay";
-
-       ucd90320@b {
-               compatible = "ti,ucd90160";
-               reg = <0x0b>;
-       };
-
-       ucd90320@c {
-               compatible = "ti,ucd90160";
-               reg = <0x0c>;
-       };
-
-       ucd90320@11 {
-               compatible = "ti,ucd90160";
-               reg = <0x11>;
-       };
-
-       rtc@32 {
-               compatible = "epson,rx8900";
-               reg = <0x32>;
-       };
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-};
-
-&i2c9 {
-       status = "okay";
-
-       ir35221@42 {
-               compatible = "infineon,ir35221";
-               reg = <0x42>;
-       };
-
-       ir35221@43 {
-               compatible = "infineon,ir35221";
-               reg = <0x43>;
-       };
-
-       ir35221@44 {
-               compatible = "infineon,ir35221";
-               reg = <0x44>;
-       };
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       tmp423b@4d {
-               compatible = "ti,tmp423";
-               reg = <0x4d>;
-       };
-
-       ir35221@72 {
-               compatible = "infineon,ir35221";
-               reg = <0x72>;
-       };
-
-       ir35221@73 {
-               compatible = "infineon,ir35221";
-               reg = <0x73>;
-       };
-
-       ir35221@74 {
-               compatible = "infineon,ir35221";
-               reg = <0x74>;
-       };
-};
-
-&i2c10 {
-       status = "okay";
-
-       ir35221@42 {
-               compatible = "infineon,ir35221";
-               reg = <0x42>;
-       };
-
-       ir35221@43 {
-               compatible = "infineon,ir35221";
-               reg = <0x43>;
-       };
-
-       ir35221@44 {
-               compatible = "infineon,ir35221";
-               reg = <0x44>;
-       };
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       tmp423b@4d {
-               compatible = "ti,tmp423";
-               reg = <0x4d>;
-       };
-
-       ir35221@72 {
-               compatible = "infineon,ir35221";
-               reg = <0x72>;
-       };
-
-       ir35221@73 {
-               compatible = "infineon,ir35221";
-               reg = <0x73>;
-       };
-
-       ir35221@74 {
-               compatible = "infineon,ir35221";
-               reg = <0x74>;
-       };
-};
-
-&i2c11 {
-       status = "okay";
-
-       tmp275@48 {
-               compatible = "ti,tmp275";
-               reg = <0x48>;
-       };
-
-       tmp275@49 {
-               compatible = "ti,tmp275";
-               reg = <0x49>;
-       };
-};
-
-&i2c12 {
-       status = "okay";
-};
-
 &i2c13 {
        status = "okay";
 
index f02de4ab058cd516cb956a3d8ebfcb9ef87028b0..ff49ec76fa7cd132f025baa177f537e9a6652232 100644 (file)
        };
 };
 
-&fmc {
-       status = "okay";
-       flash@0 {
-               status = "okay";
-               m25p,fast-read;
-               label = "bmc";
-               spi-max-frequency = <50000000>;
-#include "openbmc-flash-layout-128.dtsi"
-       };
-
-       flash@1 {
-               status = "okay";
-               m25p,fast-read;
-               label = "alt-bmc";
-               spi-max-frequency = <50000000>;
-       };
-};
-
-&spi1 {
-       status = "okay";
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_spi1_default>;
-
-       flash@0 {
-               status = "okay";
-               m25p,fast-read;
-               label = "pnor";
-               spi-max-frequency = <100000000>;
-       };
-};
-
 &mac2 {
        status = "okay";
        pinctrl-names = "default";
 
 &emmc {
        status = "okay";
+};
+
+&fsim0 {
+       status = "okay";
+
        #address-cells = <2>;
        #size-cells = <0>;
 
        status = "okay";
 };
 
-&i2c0 {
-       status = "okay";
-};
-
-&i2c1 {
-       status = "okay";
-};
-
-&i2c2 {
-       status = "okay";
-};
-
-&i2c3 {
-       status = "okay";
-
-       bmp: bmp280@77 {
-               compatible = "bosch,bmp280";
-               reg = <0x77>;
-               #io-channel-cells = <1>;
-       };
-
-       max31785@52 {
-               compatible = "maxim,max31785a";
-               reg = <0x52>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               fan@0 {
-                       compatible = "pmbus-fan";
-                       reg = <0>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-
-               fan@1 {
-                       compatible = "pmbus-fan";
-                       reg = <1>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-
-               fan@2 {
-                       compatible = "pmbus-fan";
-                       reg = <2>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-
-               fan@3 {
-                       compatible = "pmbus-fan";
-                       reg = <3>;
-                       tach-pulses = <2>;
-                       maxim,fan-rotor-input = "tach";
-                       maxim,fan-pwm-freq = <25000>;
-                       maxim,fan-dual-tach;
-                       maxim,fan-no-watchdog;
-                       maxim,fan-no-fault-ramp;
-                       maxim,fan-ramp = <2>;
-                       maxim,fan-fault-pin-mon;
-               };
-       };
-
-       dps: dps310@76 {
-               compatible = "infineon,dps310";
-               reg = <0x76>;
-               #io-channel-cells = <0>;
-       };
-
-       pca0: pca9552@60 {
-               compatible = "nxp,pca9552";
-               reg = <0x60>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               gpio-controller;
-               #gpio-cells = <2>;
-
-               gpio@0 {
-                       reg = <0>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@1 {
-                       reg = <1>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@2 {
-                       reg = <2>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@3 {
-                       reg = <3>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@4 {
-                       reg = <4>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@5 {
-                       reg = <5>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@6 {
-                       reg = <6>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@7 {
-                       reg = <7>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@8 {
-                       reg = <8>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@9 {
-                       reg = <9>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@10 {
-                       reg = <10>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@11 {
-                       reg = <11>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@12 {
-                       reg = <12>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@13 {
-                       reg = <13>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@14 {
-                       reg = <14>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@15 {
-                       reg = <15>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-       };
-
-       power-supply@68 {
-               compatible = "ibm,cffps1";
-               reg = <0x68>;
-       };
-
-       power-supply@69 {
-               compatible = "ibm,cffps1";
-               reg = <0x69>;
-       };
-};
-
-&i2c4 {
-       status = "okay";
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       ir35221@70 {
-               compatible = "infineon,ir35221";
-               reg = <0x70>;
-       };
-
-       ir35221@71 {
-               compatible = "infineon,ir35221";
-               reg = <0x71>;
-       };
-};
-
-&i2c5 {
-       status = "okay";
-
-       tmp423a@4c {
-               compatible = "ti,tmp423";
-               reg = <0x4c>;
-       };
-
-       ir35221@70 {
-               compatible = "infineon,ir35221";
-               reg = <0x70>;
-       };
-
-       ir35221@71 {
-               compatible = "infineon,ir35221";
-               reg = <0x71>;
-       };
-};
-
-&i2c7 {
-       status = "okay";
-};
-
-&i2c9 {
-       status = "okay";
-
-       tmp275@4a {
-               compatible = "ti,tmp275";
-               reg = <0x4a>;
-       };
-};
-
-&i2c10 {
-       status = "okay";
-};
-
-&i2c11 {
-       status = "okay";
-
-       pca9552: pca9552@60 {
-               compatible = "nxp,pca9552";
-               reg = <0x60>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               gpio-controller;
-               #gpio-cells = <2>;
-
-               gpio-line-names = "PS_SMBUS_RESET_N", "APSS_RESET_N",
-                       "GPU0_TH_OVERT_N_BUFF", "GPU1_TH_OVERT_N_BUFF",
-                       "GPU2_TH_OVERT_N_BUFF", "GPU3_TH_OVERT_N_BUFF",
-                       "GPU4_TH_OVERT_N_BUFF", "GPU5_TH_OVERT_N_BUFF",
-                       "GPU0_PWR_GOOD_BUFF", "GPU1_PWR_GOOD_BUFF",
-                       "GPU2_PWR_GOOD_BUFF", "GPU3_PWR_GOOD_BUFF",
-                       "GPU4_PWR_GOOD_BUFF", "GPU5_PWR_GOOD_BUFF",
-                       "12V_BREAKER_FLT_N", "THROTTLE_UNLATCHED_N";
-
-               gpio@0 {
-                       reg = <0>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@1 {
-                       reg = <1>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@2 {
-                       reg = <2>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@3 {
-                       reg = <3>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@4 {
-                       reg = <4>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@5 {
-                       reg = <5>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@6 {
-                       reg = <6>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@7 {
-                       reg = <7>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@8 {
-                       reg = <8>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@9 {
-                       reg = <9>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@10 {
-                       reg = <10>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@11 {
-                       reg = <11>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@12 {
-                       reg = <12>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@13 {
-                       reg = <13>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@14 {
-                       reg = <14>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-
-               gpio@15 {
-                       reg = <15>;
-                       type = <PCA955X_TYPE_GPIO>;
-               };
-       };
-
-       rtc@32 {
-               compatible = "epson,rx8900";
-               reg = <0x32>;
-       };
-
-       eeprom@51 {
-               compatible = "atmel,24c64";
-               reg = <0x51>;
-       };
-
-       ucd90160@64 {
-               compatible = "ti,ucd90160";
-               reg = <0x64>;
-       };
-};
-
-&i2c12 {
-       status = "okay";
-};
-
-&i2c13 {
-       status = "okay";
-};
-
 &pinctrl {
        /* Hog these as no driver is probed for the entire LPC block */
        pinctrl-names = "default";
index 5f6142d99eeb68c427333b3a52a40e17bdc90526..b72afbaadaf81fce5bc8ab687af34ae14222baf0 100644 (file)
                                spi-max-frequency = <50000000>;
                                status = "disabled";
                        };
-
-                       fsim0: fsi@1e79b000 {
-                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
-                               reg = <0x1e79b000 0x94>;
-                               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fsi1_default>;
-                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
-                               status = "disabled";
-                       };
-
-                       fsim1: fsi@1e79b100 {
-                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
-                               reg = <0x1e79b100 0x94>;
-                               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fsi2_default>;
-                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
-                               status = "disabled";
-                       };
                };
 
                mdio0: mdio@1e650000 {
                                ranges = <0 0x1e78a000 0x1000>;
                        };
 
+                       fsim0: fsi@1e79b000 {
+                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+                               reg = <0x1e79b000 0x94>;
+                               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_fsi1_default>;
+                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+                               status = "disabled";
+                       };
+
+                       fsim1: fsi@1e79b100 {
+                               compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+                               reg = <0x1e79b100 0x94>;
+                               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_fsi2_default>;
+                               clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+                               status = "disabled";
+                       };
                };
        };
 };
index 2dac3efc7640595c352037485ebd89bfde7cff76..1bc45cfd545385c640fdf2c17b6c9da2e08af2c8 100644 (file)
                mdio: mdio@18002000 {
                        compatible = "brcm,iproc-mdio";
                        reg = <0x18002000 0x8>;
-                       #size-cells = <1>;
-                       #address-cells = <0>;
+                       #size-cells = <0>;
+                       #address-cells = <1>;
                        status = "disabled";
 
                        gphy0: ethernet-phy@0 {
index 961bed832755b0389b93053c7cbf2e5b4dadacbc..e2f6ffb00aa94340fc2db94fb7e75b0e7d998bca 100644 (file)
@@ -43,7 +43,7 @@
                         <0x7c000000  0x0 0xfc000000  0x02000000>,
                         <0x40000000  0x0 0xff800000  0x00800000>;
                /* Emulate a contiguous 30-bit address range for DMA */
-               dma-ranges = <0xc0000000  0x0 0x00000000  0x3c000000>;
+               dma-ranges = <0xc0000000  0x0 0x00000000  0x40000000>;
 
                /*
                 * This node is the provider for the enable-method for
index 3caaa57eb6c81eb449b6ee8dbdc04101b5b32d00..839491628e87b392cce339a83b8ec261ab70cbf3 100644 (file)
@@ -37,7 +37,7 @@
 
                        trips {
                                cpu-crit {
-                                       temperature     = <80000>;
+                                       temperature     = <90000>;
                                        hysteresis      = <0>;
                                        type            = "critical";
                                };
index 372dc1eb88a0e82883e2049cec701034f8374b2d..2d9b4dd058307e7f1c5c44bae5e4f2e75d5ea6c3 100644 (file)
        mdio: mdio@18003000 {
                compatible = "brcm,iproc-mdio";
                reg = <0x18003000 0x8>;
-               #size-cells = <1>;
-               #address-cells = <0>;
+               #size-cells = <0>;
+               #address-cells = <1>;
        };
 
        mdio-bus-mux@18003000 {
index 6472b056a001f9054c525635ab48611bb114240b..5a2c5320437dd246835d8a217c8cfad65694b1d0 100644 (file)
                                regulator-name = "LDORTC1";
                                regulator-boot-on;
                        };
-
-                       ldortc2_reg: LDORTC2 {
-                               regulator-name = "LDORTC2";
-                               regulator-boot-on;
-                       };
                };
        };
 };
index e43bccb78ab2ba44130603fb102254a3081ca858..d8f3821a0ffdc33069769942c660866efd824759 100644 (file)
@@ -8,7 +8,7 @@
 /dts-v1/;
 
 #include "imx6dl.dtsi"
-#include "imx6qdl-icore.dtsi"
+#include "imx6qdl-icore-1.5.dtsi"
 
 / {
        model = "Engicam i.CoreM6 DualLite/Solo MIPI Starter Kit";
index 5219553df1e73529f61cf790c476e25bbd000564..bb74fc62d9135863d6f58e1e1a0b53b1a9bb4dd7 100644 (file)
@@ -63,7 +63,7 @@
                #sound-dai-cells = <0>;
                clocks = <&clk_ext_audio_codec>;
                VDDA-supply = <&reg_3p3v>;
-               VDDIO-supply = <&reg_3p3v>;
+               VDDIO-supply = <&sw2_reg>;
        };
 };
 
index 845cfad99bf9c41567cd935467e180409aef595d..87f0aa897086e100d9ea368f5fec9dfb269cec62 100644 (file)
        };
 
        rtc@56 {
-               compatible = "rv3029c2";
+               compatible = "microcrystal,rv3029";
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_rtc_hw300>;
                reg = <0x56>;
index 71ca76a5e4a513730c1c0c1c0fe5b832c469a24e..fe59dde41b649cb7ab0a386b2faab9738a69dad6 100644 (file)
        vin-supply = <&vgen5_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&vgen5_reg>;
 };
index 4829aa682aeb01cbc3e9a0c7485e87ca59bebdbf..bc86cfaaa9c270f80cf260b0599dfd931af516fe 100644 (file)
        vin-supply = <&sw2_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&sw2_reg>;
 };
index 3e1d32fdf4b856f4742ae7685fa3d7e48da4d440..5ace9e6acf85ca9839d5c15db00d77662288146e 100644 (file)
        status = "okay";
 };
 
-&reg_3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &snvs_poweroff {
        status = "okay";
 };
index f1830ed387a5512ed99ea71b82cbc09876d2f8aa..91a7548fdb8db4d4339f844214778fd80c0702d7 100644 (file)
        vin-supply = <&vgen6_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&vgen6_reg>;
 };
index a8ee7087af5a5e769d505a048f7cc438c14a7a47..5a63ca6157229ccc48d175f8045ef9446b203720 100644 (file)
        vin-supply = <&vgen6_reg>;
 };
 
-&reg_vdd3p0 {
-       vin-supply = <&sw2_reg>;
-};
-
 &reg_vdd2p5 {
        vin-supply = <&vgen6_reg>;
 };
index 1506eb12b21e08a8ba316e38ac667690f19a19f2..212144511b661b28cbb8b6f34aeac7261eeec316 100644 (file)
                enable-active-high;
        };
 
-       reg_sensors: regulator-sensors {
+       reg_peri_3v3: regulator-peri-3v3 {
                compatible = "regulator-fixed";
                pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sensors_reg>;
-               regulator-name = "sensors-supply";
+               pinctrl-0 = <&pinctrl_peri_3v3>;
+               regulator-name = "VPERI_3V3";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
                gpio = <&gpio5 2 GPIO_ACTIVE_LOW>;
+               /*
+                * If you want to want to make this dynamic please
+                * check schematics and test all affected peripherals:
+                *
+                * - sensors
+                * - ethernet phy
+                * - can
+                * - bluetooth
+                * - wm8960 audio codec
+                * - ov5640 camera
+                */
+               regulator-always-on;
        };
 
        reg_can_3v3: regulator-can-3v3 {
        pinctrl-0 = <&pinctrl_enet1>;
        phy-mode = "rmii";
        phy-handle = <&ethphy0>;
+       phy-supply = <&reg_peri_3v3>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_enet2>;
        phy-mode = "rmii";
        phy-handle = <&ethphy1>;
+       phy-supply = <&reg_peri_3v3>;
        status = "okay";
 
        mdio {
        magnetometer@e {
                compatible = "fsl,mag3110";
                reg = <0x0e>;
-               vdd-supply = <&reg_sensors>;
-               vddio-supply = <&reg_sensors>;
+               vdd-supply = <&reg_peri_3v3>;
+               vddio-supply = <&reg_peri_3v3>;
        };
 };
 
        flash0: n25q256a@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "micron,n25q256a";
+               compatible = "micron,n25q256a", "jedec,spi-nor";
                spi-max-frequency = <29000000>;
                spi-rx-bus-width = <4>;
                spi-tx-bus-width = <4>;
                >;
        };
 
-       pinctrl_sensors_reg: sensorsreggrp {
+       pinctrl_peri_3v3: peri3v3grp {
                fsl,pins = <
                        MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02      0x1b0b0
                >;
index 1fb1ec5d3d70717f08026f47e42d9bfee0c81ca8..6d16e32aed899122efcb4279581844714e05f0ca 100644 (file)
@@ -49,3 +49,7 @@
                reg = <0x80000000 0x10000000>;
        };
 };
+
+&gpmi {
+       status = "okay";
+};
index d37a1927c88ea3fae91f4d1e67407f5d39036f9f..ab91c98f21241127736dada1205d2b57ea7eef59 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu0: cpu@0 {
+               cpu0: cpu@f00 {
                        compatible = "arm,cortex-a7";
                        device_type = "cpu";
-                       reg = <0>;
+                       reg = <0xf00>;
                };
        };
 
index 5a7e3e5caebe2fc4e7f0880ab9f37d8ff026f9b3..3c534cd50ee3b2527c0e287d5216a1ffc95807c7 100644 (file)
 &aobus {
        pmu: pmu@e0 {
                compatible = "amlogic,meson8-pmu", "syscon";
-               reg = <0xe0 0x8>;
+               reg = <0xe0 0x18>;
        };
 
        pinctrl_aobus: pinctrl@84 {
index d9762de0ed34be80c943e1bb3e237253ca6fc83a..6f480827b94d36fe47dd3c0c09dd9c8c9e1363ec 100644 (file)
 
                        twsi1: i2c@d4011000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4011000 0x1000>;
+                               reg = <0xd4011000 0x70>;
                                interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI0>;
                                resets = <&soc_clocks MMP2_CLK_TWSI0>;
 
                        twsi2: i2c@d4031000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4031000 0x1000>;
+                               reg = <0xd4031000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <0>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI1>;
 
                        twsi3: i2c@d4032000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4032000 0x1000>;
+                               reg = <0xd4032000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <1>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI2>;
 
                        twsi4: i2c@d4033000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4033000 0x1000>;
+                               reg = <0xd4033000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <2>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI3>;
 
                        twsi5: i2c@d4033800 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4033800 0x1000>;
+                               reg = <0xd4033800 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <3>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI4>;
 
                        twsi6: i2c@d4034000 {
                                compatible = "mrvl,mmp-twsi";
-                               reg = <0xd4034000 0x1000>;
+                               reg = <0xd4034000 0x70>;
                                interrupt-parent = <&twsi_mux>;
                                interrupts = <4>;
                                clocks = <&soc_clocks MMP2_CLK_TWSI5>;
index fb928503ad45da35e12b24189e3ece6ae66d1cca..d9be511f054f0ecf4e5be68bd68bfe7eb25d0d09 100644 (file)
                initial-mode = <1>; /* initialize in HUB mode */
                disabled-ports = <1>;
                intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
-               reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
+               reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */
                connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
                refclk-frequency = <19200000>;
        };
index e7e4bb5ad8d5c3d73295ad724f9a7a6e8e74a217..fde84f123fbb5502ab07952e09e46c4e7c9b9cb2 100644 (file)
@@ -350,6 +350,7 @@ CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 # CONFIG_DETECT_HUNG_TASK is not set
index 26d6dee67aa6de729fdb20921b795649d9f04219..3608e55eaecdf46aa52fec6ff02134773d07fad5 100644 (file)
@@ -462,6 +462,7 @@ CONFIG_FONT_8x8=y
 CONFIG_FONT_8x16=y
 CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_PROVE_LOCKING=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
index 8c37cc8ab6f2bb938f7fca8bd239054c286883ec..c32c338f770426bcb6c97e7a36ca2bd34b241b00 100644 (file)
@@ -92,6 +92,7 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NETFILTER=y
 CONFIG_PHONET=m
+CONFIG_NET_SWITCHDEV=y
 CONFIG_CAN=m
 CONFIG_CAN_C_CAN=m
 CONFIG_CAN_C_CAN_PLATFORM=m
@@ -181,6 +182,7 @@ CONFIG_SMSC911X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 CONFIG_TI_DAVINCI_EMAC=y
 CONFIG_TI_CPSW=y
+CONFIG_TI_CPSW_SWITCHDEV=y
 CONFIG_TI_CPTS=y
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
@@ -554,6 +556,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_SPLIT=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_TI_CPSW_SWITCHDEV=y
index bda57cafa2bcb9cc5ec572e35cf083195bbbded8..de3830443613e94956c9d463fcff15780702b145 100644 (file)
@@ -212,4 +212,5 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_PRINTK_TIME=y
 # CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
index f3f42cf3b8937e3e28fd01a6e2ab4890335f0dd3..776ae07e04697527754a463647e34ed882faa1ee 100644 (file)
@@ -38,6 +38,13 @@ void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
 }
 EXPORT_SYMBOL(curve25519_arch);
 
+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
+                         const u8 secret[CURVE25519_KEY_SIZE])
+{
+       return curve25519_arch(pub, secret, curve25519_base_point);
+}
+EXPORT_SYMBOL(curve25519_base_arch);
+
 static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
                                 unsigned int len)
 {
index fa50bb04f580c93cbf19a579c4189c7503723f76..b5752f0e89362935685e431b6107817f97c2eecb 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 #include <asm/barrier.h>
 #include <asm/cacheflush.h>
 #include <asm/cp15.h>
@@ -327,6 +328,7 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
 /*
  * GITS_VPROPBASER - hi and lo bits may be accessed independently.
  */
+#define gits_read_vpropbaser(c)                __gic_readq_nonatomic(c)
 #define gits_write_vpropbaser(v, c)    __gic_writeq_nonatomic(v, c)
 
 /*
index 7667826b93f12d6cde2e151b6fa17372a1d1c3a6..5ac46e2860bcb2df9bfddd1ede00d9db88110a15 100644 (file)
@@ -50,19 +50,16 @@ void efi_virtmap_unload(void);
 
 /* arch specific definitions used by the stub code */
 
-#define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
-#define __efi_call_early(f, ...)       f(__VA_ARGS__)
-#define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
-#define efi_is_64bit()                 (false)
+#define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__)
+#define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__)
+#define efi_is_native()                (true)
 
-#define efi_table_attr(table, attr, instance)                          \
-       ((table##_t *)instance)->attr
+#define efi_table_attr(inst, attr)     (inst->attr)
 
-#define efi_call_proto(protocol, f, instance, ...)                     \
-       ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
 
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
-void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
+struct screen_info *alloc_screen_info(void);
+void free_screen_info(struct screen_info *si);
 
 static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
 {
index aefdabdbeb8486ca3a610977c0e75c5f852f2be2..ab2b654084fa33151c4df80b3f65ed0076e1852a 100644 (file)
@@ -356,7 +356,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
  *
  * Function            Memory type     Cacheability    Cache hint
  * ioremap()           Device          n/a             n/a
- * ioremap_nocache()   Device          n/a             n/a
  * ioremap_cache()     Normal          Writeback       Read allocate
  * ioremap_wc()                Normal          Non-cacheable   n/a
  * ioremap_wt()                Normal          Non-cacheable   n/a
@@ -368,13 +367,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
  * - unaligned accesses are "unpredictable"
  * - writes may be delayed before they hit the endpoint device
  *
- * ioremap_nocache() is the same as ioremap() as there are too many device
- * drivers using this for device registers, and documentation which tells
- * people to use it for such for this to be any different.  This is not a
- * safe fallback for memory-like mappings, or memory regions where the
- * compiler may generate unaligned accesses - eg, via inlining its own
- * memcpy.
- *
  * All normal memory mappings have the following properties:
  * - reads can be repeated with no side effects
  * - repeated reads return the last value written
index d3e937dcee4d0832ce3602156931dc3ecfb0aabc..007d8fea715721d2435d5aff668f5a3b62b10054 100644 (file)
@@ -10,7 +10,7 @@
  * to ensure that the maintenance completes in case we migrate to another
  * CPU.
  */
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
 #define __complete_pending_tlbi()      dsb(ish)
 #else
 #define __complete_pending_tlbi()
index 0ad2429c324f6f9387c5350a1b9e93a963e8abec..fe6e1f65932d5fb363629a206d5e6cf18220f5b4 100644 (file)
@@ -52,6 +52,24 @@ static __always_inline long clock_gettime_fallback(
        return ret;
 }
 
+static __always_inline long clock_gettime32_fallback(
+                                       clockid_t _clkid,
+                                       struct old_timespec32 *_ts)
+{
+       register struct old_timespec32 *ts asm("r1") = _ts;
+       register clockid_t clkid asm("r0") = _clkid;
+       register long ret asm ("r0");
+       register long nr asm("r7") = __NR_clock_gettime;
+
+       asm volatile(
+       "       swi #0\n"
+       : "=r" (ret)
+       : "r" (clkid), "r" (ts), "r" (nr)
+       : "memory");
+
+       return ret;
+}
+
 static __always_inline int clock_getres_fallback(
                                        clockid_t _clkid,
                                        struct __kernel_timespec *_ts)
@@ -70,6 +88,24 @@ static __always_inline int clock_getres_fallback(
        return ret;
 }
 
+static __always_inline int clock_getres32_fallback(
+                                       clockid_t _clkid,
+                                       struct old_timespec32 *_ts)
+{
+       register struct old_timespec32 *ts asm("r1") = _ts;
+       register clockid_t clkid asm("r0") = _clkid;
+       register long ret asm ("r0");
+       register long nr asm("r7") = __NR_clock_getres;
+
+       asm volatile(
+       "       swi #0\n"
+       : "=r" (ret)
+       : "r" (clkid), "r" (ts), "r" (nr)
+       : "memory");
+
+       return ret;
+}
+
 static __always_inline u64 __arch_get_hw_counter(int clock_mode)
 {
 #ifdef CONFIG_ARM_ARCH_TIMER
index c4166f31707148c4c8cb9b76772307b4df8a08d3..cff87d8d30da7b17f3d9fd46269f0ee79b3fa465 100644 (file)
@@ -34,9 +34,9 @@ struct vdso_data *__arm_get_k_vdso_data(void)
 #define __arch_get_k_vdso_data __arm_get_k_vdso_data
 
 static __always_inline
-int __arm_update_vdso_data(void)
+bool __arm_update_vdso_data(void)
 {
-       return !cntvct_ok;
+       return cntvct_ok;
 }
 #define __arch_update_vdso_data __arm_update_vdso_data
 
diff --git a/arch/arm/include/asm/vmalloc.h b/arch/arm/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..a9b3718
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM_VMALLOC_H
+#define _ASM_ARM_VMALLOC_H
+
+#endif /* _ASM_ARM_VMALLOC_H */
index 8b679e2ca3c3d3b303dddc5f494ad1047d7789db..89e5d864e923461c40bf80f794d6f91e904bb9ab 100644 (file)
@@ -53,8 +53,8 @@ obj-$(CONFIG_HAVE_ARM_SCU)    += smp_scu.o
 obj-$(CONFIG_HAVE_ARM_TWD)     += smp_twd.o
 obj-$(CONFIG_ARM_ARCH_TIMER)   += arch_timer.o
 obj-$(CONFIG_FUNCTION_TRACER)  += entry-ftrace.o
-obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o insn.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o insn.o
+obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o insn.o patch.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o insn.o patch.o
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o insn.o patch.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
 # Main staffs in KPROBES are in arch/arm/probes/ .
index 858d4e5415326efa8351dc3042acc549538bc687..77f54830554c32599aa5de9a0e0fb52c4de5c5a6 100644 (file)
@@ -211,7 +211,7 @@ __irq_svc:
        svc_entry
        irq_handler
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
        ldr     r0, [tsk, #TI_FLAGS]            @ get flags
        teq     r8, #0                          @ if preempt count != 0
@@ -226,7 +226,7 @@ ENDPROC(__irq_svc)
 
        .ltorg
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 svc_preempt:
        mov     r8, lr
 1:     bl      preempt_schedule_irq            @ irq en/disable is done inside
index bda949fd84e8b60b13ee3c79f5a28ad7e5151369..2a5ff69c28e68d195630c12bd9c05fc1bdf051b2 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/ftrace.h>
 #include <asm/insn.h>
 #include <asm/set_memory.h>
+#include <asm/patch.h>
 
 #ifdef CONFIG_THUMB2_KERNEL
 #define        NOP             0xf85deb04      /* pop.w {lr} */
@@ -35,9 +36,7 @@ static int __ftrace_modify_code(void *data)
 {
        int *command = data;
 
-       set_kernel_text_rw();
        ftrace_modify_all_code(*command);
-       set_kernel_text_ro();
 
        return 0;
 }
@@ -59,13 +58,11 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 
 int ftrace_arch_code_modify_prepare(void)
 {
-       set_all_modules_text_rw();
        return 0;
 }
 
 int ftrace_arch_code_modify_post_process(void)
 {
-       set_all_modules_text_ro();
        /* Make sure any TLB misses during machine stop are cleared. */
        flush_tlb_all();
        return 0;
@@ -97,10 +94,7 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
                        return -EINVAL;
        }
 
-       if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
-               return -EPERM;
-
-       flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+       __patch_text((void *)pc, new);
 
        return 0;
 }
index ae5020302de496baf1e1f14b7ece8021a52cb828..6607fa817bba9a5510ac0a0c2de88342a80dfdb9 100644 (file)
@@ -146,10 +146,9 @@ ARM_BE8(orr        r7, r7, #(1 << 25))     @ HSCTLR.EE
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
        @ make CNTP_* and CNTPCT accessible from PL1
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
-       lsr     r7, #16
-       and     r7, #0xf
-       cmp     r7, #1
-       bne     1f
+       ubfx    r7, r7, #16, #4
+       teq     r7, #0
+       beq     1f
        mrc     p15, 4, r7, c14, c1, 0  @ CNTHCTL
        orr     r7, r7, #3              @ PL1PCEN | PL1PCTEN
        mcr     p15, 4, r7, c14, c1, 0  @ CNTHCTL
index cea1c27c29cba2d81aefe19b5b94f94d55b87c85..46e478fb5ea203a7a42650ce4233d801db0c4277 100644 (file)
@@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task)
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 int
-copy_thread(unsigned long clone_flags, unsigned long stack_start,
-           unsigned long stk_sz, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+           unsigned long stk_sz, struct task_struct *p, unsigned long tls)
 {
        struct thread_info *thread = task_thread_info(p);
        struct pt_regs *childregs = task_pt_regs(p);
@@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
        clear_ptrace_hw_breakpoint(p);
 
        if (clone_flags & CLONE_SETTLS)
-               thread->tp_value[0] = childregs->ARM_r3;
+               thread->tp_value[0] = tls;
        thread->tp_value[1] = get_tpuser();
 
        thread_notify(THREAD_NOTIFY_COPY, thread);
index c053abd1fb5393d853fd48aca47c3ec975b27eed..abb7dd7e656fde025edbb3c88bfee01b7b831759 100644 (file)
@@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
 
 #ifdef CONFIG_PREEMPT
 #define S_PREEMPT " PREEMPT"
+#elif defined(CONFIG_PREEMPT_RT)
+#define S_PREEMPT " PREEMPT_RT"
 #else
 #define S_PREEMPT ""
 #endif
index dbe296798647fd92030a0ad2c5e57e14e2d0aa22..fa0300d8c79daae2da19e3290976994bc6536e1e 100644 (file)
@@ -13,6 +13,7 @@ static const char * const bcm2711_compat[] = {
 #ifdef CONFIG_ARCH_MULTI_V7
        "brcm,bcm2711",
 #endif
+       NULL
 };
 
 DT_MACHINE_START(BCM2711, "BCM2711")
index 21400b3fa5fe88444a60ca24f58560e40a3a609e..c9db2a9006d95d4d97254a413af22cb48cead22c 100644 (file)
@@ -105,7 +105,7 @@ static int nsp_write_lut(unsigned int cpu)
        if (!secondary_boot_addr)
                return -EINVAL;
 
-       sku_rom_lut = ioremap_nocache((phys_addr_t)secondary_boot_addr,
+       sku_rom_lut = ioremap((phys_addr_t)secondary_boot_addr,
                                      sizeof(phys_addr_t));
        if (!sku_rom_lut) {
                pr_warn("unable to ioremap SKU-ROM LUT register for cpu %u\n", cpu);
@@ -174,7 +174,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
        if (!secondary_boot_addr)
                return -EINVAL;
 
-       boot_reg = ioremap_nocache((phys_addr_t)secondary_boot_addr,
+       boot_reg = ioremap((phys_addr_t)secondary_boot_addr,
                                   sizeof(phys_addr_t));
        if (!boot_reg) {
                pr_err("unable to map boot register for cpu %u\n", cpu_id);
index dd427bd2768c3cf6f49152a1443cf154a51ad439..02b180ad724540c022d861aab5c6929f37af8060 100644 (file)
@@ -9,6 +9,7 @@ menuconfig ARCH_DAVINCI
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
        select REGMAP_MMIO
+       select RESET_CONTROLLER
        select HAVE_IDE
        select PINCTRL_SINGLE
 
index 3e447d4688458b31fe994be4b0a3954802f312b4..e650131ee88f06aadc24f5cf3ff60f9253ff2e44 100644 (file)
@@ -34,7 +34,7 @@ void __iomem  *davinci_sysmod_base;
 
 void davinci_map_sysmod(void)
 {
-       davinci_sysmod_base = ioremap_nocache(DAVINCI_SYSTEM_MODULE_BASE,
+       davinci_sysmod_base = ioremap(DAVINCI_SYSTEM_MODULE_BASE,
                                              0x800);
        /*
         * Throw a bug since a lot of board initialization code depends
index 4ef56571145bf5ef20862368ed70372d8b7a8315..6e7f10c8098ac5fc2a2ca3853e3ca2a0b4257fcd 100644 (file)
@@ -12,6 +12,7 @@ menuconfig ARCH_EXYNOS
        select ARCH_SUPPORTS_BIG_ENDIAN
        select ARM_AMBA
        select ARM_GIC
+       select EXYNOS_IRQ_COMBINER
        select COMMON_CLK_SAMSUNG
        select EXYNOS_ASV
        select EXYNOS_CHIPID
index d8118031c51f710a668cbb7f66e4cb4025d4d38e..871f98342d50e67e9962ae863436bf839c8cb4e7 100644 (file)
@@ -84,7 +84,7 @@ struct device * __init imx_soc_device_init(void)
        const char *ocotp_compat = NULL;
        struct soc_device *soc_dev;
        struct device_node *root;
-       struct regmap *ocotp;
+       struct regmap *ocotp = NULL;
        const char *soc_id;
        u64 soc_uid = 0;
        u32 val;
@@ -148,11 +148,11 @@ struct device * __init imx_soc_device_init(void)
                soc_id = "i.MX6UL";
                break;
        case MXC_CPU_IMX6ULL:
-               ocotp_compat = "fsl,imx6ul-ocotp";
+               ocotp_compat = "fsl,imx6ull-ocotp";
                soc_id = "i.MX6ULL";
                break;
        case MXC_CPU_IMX6ULZ:
-               ocotp_compat = "fsl,imx6ul-ocotp";
+               ocotp_compat = "fsl,imx6ull-ocotp";
                soc_id = "i.MX6ULZ";
                break;
        case MXC_CPU_IMX6SLL:
@@ -175,7 +175,9 @@ struct device * __init imx_soc_device_init(void)
                ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
                if (IS_ERR(ocotp))
                        pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
+       }
 
+       if (!IS_ERR_OR_NULL(ocotp)) {
                regmap_read(ocotp, OCOTP_UID_H, &val);
                soc_uid = val;
                regmap_read(ocotp, OCOTP_UID_L, &val);
index 0331c58b07a26a9c8415496c8da068f8b899d43a..dff651b9f252d4451dce883ec5fcd06d2129fda3 100644 (file)
@@ -17,9 +17,9 @@ extern void pxa168_clear_keypad_wakeup(void);
 #include <linux/platform_data/keypad-pxa27x.h>
 #include <linux/pxa168_eth.h>
 #include <linux/platform_data/mv_usb.h>
+#include <linux/soc/mmp/cputype.h>
 
 #include "devices.h"
-#include "cputype.h"
 
 extern struct pxa_device_desc pxa168_device_uart1;
 extern struct pxa_device_desc pxa168_device_uart2;
index 110dcb3314d13f04c4e783a5762dbcd401c6f77e..c65cfc1ad99b4798a69c0b233e984b0eaf01af3f 100644 (file)
@@ -207,7 +207,7 @@ static int __init mmp_dt_init_timer(struct device_node *np)
                ret = clk_prepare_enable(clk);
                if (ret)
                        return ret;
-               rate = clk_get_rate(clk) / 2;
+               rate = clk_get_rate(clk);
        } else if (cpu_is_pj4()) {
                rate = 6500000;
        } else {
index ad08d470a2cac3f9f767844e0857a371ecb7bdf9..dca7d06c0b9386192010ca469f7a751cad515d36 100644 (file)
@@ -95,6 +95,7 @@ config ARCH_OMAP2PLUS
        bool
        select ARCH_HAS_BANDGAP
        select ARCH_HAS_HOLES_MEMORYMODEL
+       select ARCH_HAS_RESET_CONTROLLER
        select ARCH_OMAP
        select CLKSRC_MMIO
        select GENERIC_IRQ_CHIP
@@ -105,11 +106,11 @@ config ARCH_OMAP2PLUS
        select OMAP_DM_TIMER
        select OMAP_GPMC
        select PINCTRL
+       select RESET_CONTROLLER
        select SOC_BUS
        select TI_SYSC
        select OMAP_IRQCHIP
        select CLKSRC_TI_32K
-       select ARCH_HAS_RESET_CONTROLLER
        help
          Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
index ca52271de5a880d7e7e51bb05b21efc04ae706aa..e95c224ffc4d876d38f7b2ab5d07c2a1ed61ec59 100644 (file)
@@ -306,10 +306,14 @@ static void __init dra7x_evm_mmc_quirk(void)
 
 static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk)
 {
+       struct clk_hw *hw = __clk_get_hw(clk);
        struct clockdomain *clkdm = NULL;
        struct clk_hw_omap *hwclk;
 
-       hwclk = to_clk_hw_omap(__clk_get_hw(clk));
+       hwclk = to_clk_hw_omap(hw);
+       if (!omap2_clk_is_hw_omap(hw))
+               return NULL;
+
        if (hwclk && hwclk->clkdm_name)
                clkdm = clkdm_lookup(hwclk->clkdm_name);
 
index e1a394ac3eea7c4a28be5b23ffa874e2d86f8e65..868dc0cf4859f2debe5b0bedf8d189726aa8fd1e 100644 (file)
@@ -1008,7 +1008,7 @@ static void __init magician_init(void)
        pxa_set_udc_info(&magician_udc_info);
 
        /* Check LCD type we have */
-       cpld = ioremap_nocache(PXA_CS3_PHYS, 0x1000);
+       cpld = ioremap(PXA_CS3_PHYS, 0x1000);
        if (cpld) {
                u8 board_id = __raw_readb(cpld + 0x14);
 
index 96330ef256416dce9c3d310dcef6c2ccb06450bc..e771ce70e132ff535962f2af51727105f72b5fc0 100644 (file)
@@ -189,7 +189,7 @@ static void apmu_init_cpu(struct resource *res, int cpu, int bit)
        if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem)
                return;
 
-       apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res));
+       apmu_cpus[cpu].iomem = ioremap(res->start, resource_size(res));
        apmu_cpus[cpu].bit = bit;
 
        pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res);
index e84599dd96f1a2d2b83eaa2fba2dcc9528df0874..672081405a7ed7aaf579d66c236970457a589d1f 100644 (file)
@@ -103,7 +103,7 @@ map:
        iounmap(p);
 
        /* setup reset vectors */
-       p = ioremap_nocache(RST, 0x63);
+       p = ioremap(RST, 0x63);
        bar = phys_to_sbar(res.start);
        if (has_a15) {
                writel_relaxed(bar, p + CA15BAR);
index 787d039b5a073863c2e655f6ee779c201b2f42d1..f760c27c99074a02f19bd31368d0f474d6bc10b8 100644 (file)
@@ -28,7 +28,7 @@ static void __init r8a7740_meram_workaround(void)
 {
        void __iomem *reg;
 
-       reg = ioremap_nocache(MEBUFCNTR, 4);
+       reg = ioremap(MEBUFCNTR, 4);
        if (reg) {
                iowrite32(0x01600164, reg);
                iounmap(reg);
@@ -37,9 +37,9 @@ static void __init r8a7740_meram_workaround(void)
 
 static void __init r8a7740_init_irq_of(void)
 {
-       void __iomem *intc_prio_base = ioremap_nocache(0xe6900010, 0x10);
-       void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
-       void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
+       void __iomem *intc_prio_base = ioremap(0xe6900010, 0x10);
+       void __iomem *intc_msk_base = ioremap(0xe6900040, 0x10);
+       void __iomem *pfc_inta_ctrl = ioremap(0xe605807c, 0x4);
 
        irqchip_init();
 
index ce51794f64c7b1dd4a00cbfa5b420251c1fdb581..2bc93f391bcf8e5d4142ed2855b661eac32e4778 100644 (file)
@@ -22,7 +22,7 @@
 
 static void __init r8a7778_init_irq_dt(void)
 {
-       void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
+       void __iomem *base = ioremap(0xfe700000, 0x00100000);
 
        BUG_ON(!base);
 
index 354e0e7025aec5c1da2145ae4037f7e805868970..1da11bdb1dfbd6f1ce906b83ba2d84ecd396f316 100644 (file)
@@ -551,8 +551,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
 
 static int __init ve_spc_clk_init(void)
 {
-       int cpu;
+       int cpu, cluster;
        struct clk *clk;
+       bool init_opp_table[MAX_CLUSTERS] = { false };
 
        if (!info)
                return 0; /* Continue only if SPC is initialised */
@@ -578,8 +579,17 @@ static int __init ve_spc_clk_init(void)
                        continue;
                }
 
+               cluster = topology_physical_package_id(cpu_dev->id);
+               if (init_opp_table[cluster])
+                       continue;
+
                if (ve_init_opp_table(cpu_dev))
                        pr_warn("failed to initialise cpu%d opp table\n", cpu);
+               else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
+                        topology_core_cpumask(cpu_dev->id)))
+                       pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
+               else
+                       init_opp_table[cluster] = true;
        }
 
        platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
index 0ee8fc4b4672c6d2123c231a47b61cb870ec6052..dc8f152f35566710060f1d82e03d464200ee4bc5 100644 (file)
@@ -135,13 +135,13 @@ flush_levels:
        and     r1, r1, #7                      @ mask of the bits for current cache only
        cmp     r1, #2                          @ see what cache we have at this level
        blt     skip                            @ skip if no cache, or just i-cache
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        save_and_disable_irqs_notrace r9        @ make cssr&csidr read atomic
 #endif
        mcr     p15, 2, r10, c0, c0, 0          @ select current cache level in cssr
        isb                                     @ isb to sych the new cssr&csidr
        mrc     p15, 1, r1, c0, c0, 0           @ read the new csidr
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        restore_irqs_notrace r9
 #endif
        and     r2, r1, #7                      @ extract the length of the cache lines
index a0035c426ce635b3ceacc647d712aabb61d0e63e..1bc3a0a507539d0e71e365ee4f0c476cea387654 100644 (file)
@@ -183,13 +183,13 @@ flush_levels:
        and     r1, r1, #7                      @ mask of the bits for current cache only
        cmp     r1, #2                          @ see what cache we have at this level
        blt     skip                            @ skip if no cache, or just i-cache
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        save_and_disable_irqs_notrace r9        @ make cssr&csidr read atomic
 #endif
        write_csselr r10, r1                    @ set current cache level
        isb                                     @ isb to sych the new cssr&csidr
        read_ccsidr r1                          @ read the new csidr
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        restore_irqs_notrace r9
 #endif
        and     r2, r1, #7                      @ extract the length of the cache lines
index 0fda344beb0bf7802125313ebd56120863a78e9d..1babb392e70a40624b479556535c6d9eeb8001a8 100644 (file)
@@ -14,7 +14,7 @@ targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
 obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
 
 ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
-ccflags-y += -DDISABLE_BRANCH_PROFILING
+ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO32
 
 ldflags-$(CONFIG_CPU_ENDIAN_BE8) := --be8
 ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
index b1b4476ddb834ba64a8ba93e55e88d3a7282fed1..d2cebf613ec3c7370b6fb69d6ef03d1da03415d6 100644 (file)
@@ -34,32 +34,32 @@ config ARM64
        select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
-       select ARCH_INLINE_READ_LOCK if !PREEMPT
-       select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
-       select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
-       select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
-       select ARCH_INLINE_READ_UNLOCK if !PREEMPT
-       select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
-       select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
-       select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
-       select ARCH_INLINE_WRITE_LOCK if !PREEMPT
-       select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
-       select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
-       select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
-       select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
-       select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
-       select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
-       select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
-       select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
-       select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
-       select ARCH_INLINE_SPIN_LOCK if !PREEMPT
-       select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
-       select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
-       select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
-       select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
-       select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
-       select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
-       select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
+       select ARCH_INLINE_READ_LOCK if !PREEMPTION
+       select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
+       select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
+       select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
+       select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
        select ARCH_KEEP_MEMBLOCK
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_USE_QUEUED_RWLOCKS
@@ -81,7 +81,7 @@ config ARM64
        select ARM_GIC_V3
        select ARM_GIC_V3_ITS if PCI
        select ARM_PSCI_FW
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
        select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -138,6 +138,7 @@ config ARM64
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
        select HAVE_CONTEXT_TRACKING
+       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
@@ -161,6 +162,7 @@ config ARM64
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_FUNCTION_ARG_ACCESS_API
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_RCU_TABLE_FREE
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
@@ -301,6 +303,9 @@ config ARCH_SUPPORTS_UPROBES
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
+config BROKEN_GAS_INST
+       def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
+
 config KASAN_SHADOW_OFFSET
        hex
        depends on KASAN
@@ -514,9 +519,13 @@ config ARM64_ERRATUM_1418040
 
          If unsure, say Y.
 
+config ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+       bool
+
 config ARM64_ERRATUM_1165522
        bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
        default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
        help
          This option adds a workaround for ARM Cortex-A76 erratum 1165522.
 
@@ -526,6 +535,19 @@ config ARM64_ERRATUM_1165522
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_1530923
+       bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
+       default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+       help
+         This option adds a workaround for ARM Cortex-A55 erratum 1530923.
+
+         Affected Cortex-A55 cores (r0p0, r0p1, r1p0, r2p0) could end-up with
+         corrupted TLBs by speculating an AT instruction during a guest
+         context switch.
+
+         If unsure, say Y.
+
 config ARM64_ERRATUM_1286807
        bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
        default y
@@ -542,9 +564,13 @@ config ARM64_ERRATUM_1286807
          invalidated has been observed by other observers. The
          workaround repeats the TLBI+DSB operation.
 
+config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
+       bool
+
 config ARM64_ERRATUM_1319367
        bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
        default y
+       select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
        help
          This option adds work arounds for ARM Cortex-A57 erratum 1319537
          and A72 erratum 1319367
@@ -1363,6 +1389,11 @@ config ARM64_PAN
         instruction if the cpu does not implement the feature.
 
 config ARM64_LSE_ATOMICS
+       bool
+       default ARM64_USE_LSE_ATOMICS
+       depends on $(as-instr,.arch_extension lse)
+
+config ARM64_USE_LSE_ATOMICS
        bool "Atomic instructions"
        depends on JUMP_LABEL
        default y
@@ -1484,6 +1515,30 @@ config ARM64_PTR_AUTH
 
 endmenu
 
+menu "ARMv8.5 architectural features"
+
+config ARM64_E0PD
+       bool "Enable support for E0PD"
+       default y
+       help
+         E0PD (part of the ARMv8.5 extensions) allows us to ensure
+         that EL0 accesses made via TTBR1 always fault in constant time,
+         providing similar benefits to KASLR as those provided by KPTI, but
+         with lower overhead and without disrupting legitimate access to
+         kernel memory such as SPE.
+
+         This option enables E0PD for TTBR1 where available.
+
+config ARCH_RANDOM
+       bool "Enable support for random number generation"
+       default y
+       help
+         Random number generation (part of the ARMv8.5 Extensions)
+         provides a high bandwidth, cryptographically secure
+         hardware random number generator.
+
+endmenu
+
 config ARM64_SVE
        bool "ARM Scalable Vector Extension support"
        default y
@@ -1544,7 +1599,7 @@ config ARM64_MODULE_PLTS
 
 config ARM64_PSEUDO_NMI
        bool "Support for NMI-like interrupts"
-       select CONFIG_ARM_GIC_V3
+       select ARM_GIC_V3
        help
          Adds support for mimicking Non-Maskable Interrupts through the use of
          GIC interrupt priority. This support requires version 3 or later of
index 1fbe24d4fdb66a7c7833c79ea6cfcb023b61561f..dca1a97751ab13dacebb2250842fcca56b772a85 100644 (file)
@@ -30,11 +30,8 @@ LDFLAGS_vmlinux      += --fix-cortex-a53-843419
   endif
 endif
 
-# Check for binutils support for specific extensions
-lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
-
-ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
-  ifeq ($(lseinstr),)
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
+  ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
 $(warning LSE atomics not supported by binutils)
   endif
 endif
@@ -45,19 +42,15 @@ cc_has_k_constraint := $(call try-run,echo                          \
                return 0;                                               \
        }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
 
-ifeq ($(CONFIG_ARM64), y)
-brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
-
-  ifneq ($(brokengasinst),)
+ifeq ($(CONFIG_BROKEN_GAS_INST),y)
 $(warning Detected assembler with broken .inst; disassembly will be unreliable)
-  endif
 endif
 
-KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst)     \
+KBUILD_CFLAGS  += -mgeneral-regs-only  \
                   $(compat_vdso) $(cc_has_k_constraint)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS  += $(call cc-disable-warning, psabi)
-KBUILD_AFLAGS  += $(lseinstr) $(brokengasinst) $(compat_vdso)
+KBUILD_AFLAGS  += $(compat_vdso)
 
 KBUILD_CFLAGS  += $(call cc-option,-mabi=lp64)
 KBUILD_AFLAGS  += $(call cc-option,-mabi=lp64)
index 1f012c506434360f764655a89eb4719c2aabd0f9..cd3414898d10f9c6c41db7cf461f49744515071a 100644 (file)
@@ -16,7 +16,7 @@
 
 OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
 
-targets := Image Image.gz
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
 
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
index 96ab0227e82d187bc3b5eb50e3064dcabbe3ef3f..121e6cc4849b45139f5a396cec851352863fe841 100644 (file)
@@ -15,7 +15,7 @@
        pinctrl-names = "default";
        pinctrl-0 = <&mmc2_pins>;
        vmmc-supply = <&reg_dcdc1>;
-       vqmmc-supply = <&reg_dcdc1>;
+       vqmmc-supply = <&reg_eldo1>;
        bus-width = <8>;
        non-removable;
        cap-mmc-hw-reset;
index 01a9a52edae4afa88c43f821531345543742a64e..393c1948a4959beb2c07b87456f0a558fffc1c5f 100644 (file)
 &mmc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
-       vmmc-supply = <&reg_aldo2>;
+       vmmc-supply = <&reg_dcdc1>;
        vqmmc-supply = <&reg_dldo4>;
        mmc-pwrseq = <&wifi_pwrseq>;
        bus-width = <4>;
index 144a2c19ac02659a4b288d0df130ae26303d92cf..d1fc9c2055f4904c19e7661be8c1cb36082423ee 100644 (file)
 
        pmu {
                compatible = "arm,armv8-pmuv3";
-               interrupts = <0 120 8>,
-                            <0 121 8>,
-                            <0 122 8>,
-                            <0 123 8>;
+               interrupts = <0 170 4>,
+                            <0 171 4>,
+                            <0 172 4>,
+                            <0 173 4>;
                interrupt-affinity = <&cpu0>,
                                     <&cpu1>,
                                     <&cpu2>,
index 5bd07469766bf53ed4f86b8096b5160bd68fc2c6..a8bb3fa9fec98e994ce2876b95e81e86b8369c02 100644 (file)
        };
 
        gpio-keys {
-               compatible = "gpio-keys-polled";
-               poll-interval = <100>;
+               compatible = "gpio-keys";
 
                key1 {
                        label = "A";
                        linux,code = <BTN_0>;
                        gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <34 IRQ_TYPE_EDGE_BOTH>;
                };
 
                key2 {
                        label = "B";
                        linux,code = <BTN_1>;
                        gpios = <&gpio GPIOH_7 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <35 IRQ_TYPE_EDGE_BOTH>;
                };
 
                key3 {
                        label = "C";
                        linux,code = <BTN_2>;
                        gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <2 IRQ_TYPE_EDGE_BOTH>;
+               };
+
+               mic_mute {
+                       label = "MicMute";
+                       linux,code = <SW_MUTE_DEVICE>;
+                       linux,input-type = <EV_SW>;
+                       gpios = <&gpio_ao GPIOE_2 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <99 IRQ_TYPE_EDGE_BOTH>;
+               };
+
+               power_key {
+                       label = "PowerKey";
+                       linux,code = <KEY_POWER>;
+                       gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+                       interrupt-parent = <&gpio_intc>;
+                       interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
                };
        };
 
 
        bluetooth {
                compatible = "brcm,bcm43438-bt";
+               interrupt-parent = <&gpio_intc>;
+               interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
                shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
                max-speed = <2000000>;
                clocks = <&wifi32k>;
index 8e8a77eb596ae47afca5c62f1842b3a68c02d433..a6f9b7784e8fc8c98bcf90b152a136cc97691c7c 100644 (file)
@@ -88,7 +88,7 @@
 
        reboot {
                compatible ="syscon-reboot";
-               regmap = <&dcfg>;
+               regmap = <&rst>;
                offset = <0xb0>;
                mask = <0x02>;
        };
                dcfg: syscon@1e00000 {
                        compatible = "fsl,ls1028a-dcfg", "syscon";
                        reg = <0x0 0x1e00000 0x0 0x10000>;
-                       big-endian;
+                       little-endian;
+               };
+
+               rst: syscon@1e60000 {
+                       compatible = "syscon";
+                       reg = <0x0 0x1e60000 0x0 0x10000>;
+                       little-endian;
                };
 
                scfg: syscon@1fc0000 {
                                               0x00010004 0x0000003d
                                               0x00010005 0x00000045
                                               0x00010006 0x0000004d
-                                              0x00010007 0x00000045
+                                              0x00010007 0x00000055
                                               0x00010008 0x0000005e
                                               0x00010009 0x00000066
                                               0x0001000a 0x0000006e
index 6edbdfe2d0d7c44bec5ff1aeec193b667e2f1050..3d95b66a2d71c0fa97aba2e37d8855c44d295706 100644 (file)
                                reg = <0x30bd0000 0x10000>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
-                                        <&clk IMX8MM_CLK_SDMA1_ROOT>;
+                                        <&clk IMX8MM_CLK_AHB>;
                                clock-names = "ipg", "ahb";
                                #dma-cells = <3>;
                                fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
index 2a759dff9f87168f00a8c68f920cb846cf9d8ba6..596bc65f475c21a291caa3f143a1ac1e27b91d2f 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_imu>;
                interrupt-parent = <&gpio3>;
-               interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+               interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
                vdd-supply = <&reg_3v3_p>;
                vddio-supply = <&reg_3v3_p>;
        };
index 94090c6fb946a6bee451056b935a00d80fbe0ec7..d43e1299c8ef618d65232a7dfcd7fdc9150ebf5b 100644 (file)
 
        pmu {
                compatible = "arm,armv8-pmuv3";
-               interrupts = <0 120 8>,
-                            <0 121 8>,
-                            <0 122 8>,
-                            <0 123 8>;
+               interrupts = <0 170 4>,
+                            <0 171 4>,
+                            <0 172 4>,
+                            <0 173 4>;
                interrupt-affinity = <&cpu0>,
                                     <&cpu1>,
                                     <&cpu2>,
index 76b49f57310152829a1705ae2b7be8e32ec016df..16f1656d5203bd95d9c0095bae10ebf0d17b46fe 100644 (file)
@@ -49,7 +49,8 @@
 
        ir-receiver {
                compatible = "gpio-ir-receiver";
-               gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>;
+               gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>;
+               linux,rc-map-name = "rc-beelink-gs1";
        };
 };
 
index e273faca924f9a9344e1ed14c3c23932df4133a1..999da59f03a9d092b5b7adf1d2308b13efffa91b 100644 (file)
@@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
                 * input when running on a preemptible kernel, but process the
                 * data block by block instead.
                 */
-               if (IS_ENABLED(CONFIG_PREEMPT) &&
+               if (IS_ENABLED(CONFIG_PREEMPTION) &&
                    chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
                        chunk = SHA256_BLOCK_SIZE -
                                sctx->count % SHA256_BLOCK_SIZE;
index b9f8d787eea9fc57b724d7a87dff61172551b156..324e7d5ab37edca2f17334b9734bcffc5d813da8 100644 (file)
@@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length);
 static inline void apply_alternatives_module(void *start, size_t length) { }
 #endif
 
-#define ALTINSTR_ENTRY(feature,cb)                                           \
+#define ALTINSTR_ENTRY(feature)                                                      \
        " .word 661b - .\n"                             /* label           */ \
-       " .if " __stringify(cb) " == 0\n"                                     \
        " .word 663f - .\n"                             /* new instruction */ \
-       " .else\n"                                                            \
+       " .hword " __stringify(feature) "\n"            /* feature bit     */ \
+       " .byte 662b-661b\n"                            /* source len      */ \
+       " .byte 664f-663f\n"                            /* replacement len */
+
+#define ALTINSTR_ENTRY_CB(feature, cb)                                       \
+       " .word 661b - .\n"                             /* label           */ \
        " .word " __stringify(cb) "- .\n"               /* callback */        \
-       " .endif\n"                                                           \
        " .hword " __stringify(feature) "\n"            /* feature bit     */ \
        " .byte 662b-661b\n"                            /* source len      */ \
        " .byte 664f-663f\n"                            /* replacement len */
@@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
  *
  * Alternatives with callbacks do not generate replacement instructions.
  */
-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb)        \
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)    \
        ".if "__stringify(cfg_enabled)" == 1\n"                         \
        "661:\n\t"                                                      \
        oldinstr "\n"                                                   \
        "662:\n"                                                        \
        ".pushsection .altinstructions,\"a\"\n"                         \
-       ALTINSTR_ENTRY(feature,cb)                                      \
+       ALTINSTR_ENTRY(feature)                                         \
        ".popsection\n"                                                 \
-       " .if " __stringify(cb) " == 0\n"                               \
        ".pushsection .altinstr_replacement, \"a\"\n"                   \
        "663:\n\t"                                                      \
        newinstr "\n"                                                   \
@@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        ".popsection\n\t"                                               \
        ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
        ".org   . - (662b-661b) + (664b-663b)\n"                        \
-       ".else\n\t"                                                     \
+       ".endif\n"
+
+#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb)       \
+       ".if "__stringify(cfg_enabled)" == 1\n"                         \
+       "661:\n\t"                                                      \
+       oldinstr "\n"                                                   \
+       "662:\n"                                                        \
+       ".pushsection .altinstructions,\"a\"\n"                         \
+       ALTINSTR_ENTRY_CB(feature, cb)                                  \
+       ".popsection\n"                                                 \
        "663:\n\t"                                                      \
        "664:\n\t"                                                      \
-       ".endif\n"                                                      \
        ".endif\n"
 
 #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)        \
-       __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
+       __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
 
 #define ALTERNATIVE_CB(oldinstr, cb) \
-       __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+       __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
 #else
 
 #include <asm/assembler.h>
index 89e4c8b7934905657bd6b4c21eb780540bf80bc0..4750fc8030c3266ec1ad017177f9ecb5a903b641 100644 (file)
@@ -141,6 +141,7 @@ static inline u32 gic_read_rpr(void)
 #define gicr_read_pendbaser(c)         readq_relaxed(c)
 
 #define gits_write_vpropbaser(v, c)    writeq_relaxed(v, c)
+#define gits_read_vpropbaser(c)                readq_relaxed(c)
 
 #define gits_write_vpendbaser(v, c)    writeq_relaxed(v, c)
 #define gits_read_vpendbaser(c)                readq_relaxed(c)
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
new file mode 100644 (file)
index 0000000..3fe02da
--- /dev/null
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <linux/random.h>
+#include <asm/cpufeature.h>
+
+static inline bool __arm64_rndr(unsigned long *v)
+{
+       bool ok;
+
+       /*
+        * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
+        * and set PSTATE.NZCV to 0b0100 otherwise.
+        */
+       asm volatile(
+               __mrs_s("%0", SYS_RNDR_EL0) "\n"
+       "       cset %w1, ne\n"
+       : "=r" (*v), "=r" (ok)
+       :
+       : "cc");
+
+       return ok;
+}
+
+static inline bool __must_check arch_get_random_long(unsigned long *v)
+{
+       return false;
+}
+
+static inline bool __must_check arch_get_random_int(unsigned int *v)
+{
+       return false;
+}
+
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+{
+       /*
+        * Only support the generic interface after we have detected
+        * the system wide capability, avoiding complexity with the
+        * cpufeature code and with potential scheduling between CPUs
+        * with and without the feature.
+        */
+       if (!cpus_have_const_cap(ARM64_HAS_RNG))
+               return false;
+
+       return __arm64_rndr(v);
+}
+
+
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+{
+       unsigned long val;
+       bool ok = arch_get_random_seed_long(&val);
+
+       *v = val;
+       return ok;
+}
+
+static inline bool __init __early_cpu_has_rndr(void)
+{
+       /* Open code as we run prior to the first call to cpufeature. */
+       unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
+       return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
+}
+
+#else
+
+static inline bool __arm64_rndr(unsigned long *v) { return false; }
+static inline bool __init __early_cpu_has_rndr(void) { return false; }
+
+#endif /* CONFIG_ARCH_RANDOM */
+#endif /* _ASM_ARCHRANDOM_H */
index b8cf7c85ffa2a2e7c5cacd15910185d44fe751a8..aca337d79d12ab288cf186fa619e608847e224f7 100644 (file)
        msr     daif, \flags
        .endm
 
-       /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
-       .macro  inherit_daif, pstate:req, tmp:req
-       and     \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
-       msr     daif, \tmp
-       .endm
-
        /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
        .macro enable_da_f
        msr     daifclr, #(8 | 4 | 1)
 9990:
        .endm
 
-/*
- * SMP data memory barrier
- */
-       .macro  smp_dmb, opt
-       dmb     \opt
-       .endm
-
 /*
  * RAS Error Synchronization barrier
  */
@@ -461,17 +448,6 @@ USER(\label, ic    ivau, \tmp2)                    // invalidate I line PoU
        b.ne    9998b
        .endm
 
-/*
- * Annotate a function as position independent, i.e., safe to be called before
- * the kernel virtual mapping is activated.
- */
-#define ENDPIPROC(x)                   \
-       .globl  __pi_##x;               \
-       .type   __pi_##x, %function;    \
-       .set    __pi_##x, x;            \
-       .size   __pi_##x, . - x;        \
-       ENDPROC(x)
-
 /*
  * Annotate a function as being unsuitable for kprobes.
  */
@@ -699,8 +675,8 @@ USER(\label, ic     ivau, \tmp2)                    // invalidate I line PoU
  * where <label> is optional, and marks the point where execution will resume
  * after a yield has been performed. If omitted, execution resumes right after
  * the endif_yield_neon invocation. Note that the entire sequence, including
- * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
- * is not defined.
+ * the provided patchup code, will be omitted from the image if
+ * CONFIG_PREEMPTION is not defined.
  *
  * As a convenience, in the case where no patchup code is required, the above
  * sequence may be abbreviated to
@@ -728,7 +704,7 @@ USER(\label, ic     ivau, \tmp2)                    // invalidate I line PoU
        .endm
 
        .macro          if_will_cond_yield_neon
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        get_current_task        x0
        ldr             x0, [x0, #TSK_TI_PREEMPT]
        sub             x0, x0, #PREEMPT_DISABLE_OFFSET
index 7b012148bfd6c1ccbd6f47a437ca1323d92428ed..13869b76b58cdce787247bd1e8cbfc6b95ea99b8 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/stringify.h>
 
-#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
 #define __LL_SC_FALLBACK(asm_ops)                                      \
 "      b       3f\n"                                                   \
 "      .subsection     1\n"                                            \
index 574808b9df4c8976db12fce41519da3365c33ead..da3280f639cd7e8b9058c0125e6d0095622c7acd 100644 (file)
@@ -14,6 +14,7 @@
 static inline void __lse_atomic_##op(int i, atomic_t *v)                       \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op "     %w[i], %[v]\n"                                  \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)    \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op #mb " %w[i], %w[i], %[v]"                             \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v)   \
        u32 tmp;                                                        \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
        "       add     %w[i], %w[i], %w[tmp]"                          \
        : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN(        , al, "memory")
 static inline void __lse_atomic_and(int i, atomic_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       mvn     %w[i], %w[i]\n"
        "       stclr   %w[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)     \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mvn     %w[i], %w[i]\n"                                 \
        "       ldclr" #mb "    %w[i], %w[i], %[v]"                     \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
 static inline void __lse_atomic_sub(int i, atomic_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       neg     %w[i], %w[i]\n"
        "       stadd   %w[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
        u32 tmp;                                                        \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
        "       add     %w[i], %w[i], %w[tmp]"                          \
@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN(        , al, "memory")
 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)     \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[i], %[v]"                     \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)           \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op "     %[i], %[v]\n"                                   \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op #mb " %[i], %[i], %[v]"                               \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
        "       add     %[i], %[i], %x[tmp]"                            \
        : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       mvn     %[i], %[i]\n"
        "       stclr   %[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)        \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mvn     %[i], %[i]\n"                                   \
        "       ldclr" #mb "    %[i], %[i], %[v]"                       \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       neg     %[i], %[i]\n"
        "       stadd   %[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)    \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
        "       add     %[i], %[i], %x[tmp]"                            \
@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)        \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %[i], %[v]"                       \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
        unsigned long tmp;
 
        asm volatile(
+       __LSE_PREAMBLE
        "1:     ldr     %x[tmp], %[v]\n"
        "       subs    %[ret], %x[tmp], #1\n"
        "       b.lt    2f\n"
@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr,                  \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mov     %" #w "[tmp], %" #w "[old]\n"                   \
        "       cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"    \
        "       mov     %" #w "[ret], %" #w "[tmp]"                     \
@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                             \
        register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
        "       eor     %[old1], %[old1], %[oldval1]\n"                 \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
index d064a50deb5fbf5989c7d8fceefc99647ef7a191..8d2a7de39744236771557118e8189eb6f67441a9 100644 (file)
@@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 }
 #define ip_fast_csum ip_fast_csum
 
+extern unsigned int do_csum(const unsigned char *buff, int len);
+#define do_csum do_csum
+
 #include <asm-generic/checksum.h>
 
 #endif /* __ASM_CHECKSUM_H */
index d72d995b7e2585baeb1748463c87e978fbcb0850..b4a40535a3d8281a1154e7bd3c08199504157ce1 100644 (file)
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
        u32             reg_id_isar3;
        u32             reg_id_isar4;
        u32             reg_id_isar5;
+       u32             reg_id_isar6;
        u32             reg_id_mmfr0;
        u32             reg_id_mmfr1;
        u32             reg_id_mmfr2;
index b926838711194b005060815503955b0199078b34..865e0253fc1ef3c12e496ef8724aececd7677632 100644 (file)
@@ -44,7 +44,7 @@
 #define ARM64_SSBS                             34
 #define ARM64_WORKAROUND_1418040               35
 #define ARM64_HAS_SB                           36
-#define ARM64_WORKAROUND_1165522               37
+#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE    37
 #define ARM64_HAS_ADDRESS_AUTH_ARCH            38
 #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF         39
 #define ARM64_HAS_GENERIC_AUTH_ARCH            40
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM    45
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
 #define ARM64_WORKAROUND_1542419               47
-#define ARM64_WORKAROUND_1319367               48
+#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE   48
+#define ARM64_HAS_E0PD                         49
+#define ARM64_HAS_RNG                          50
 
-#define ARM64_NCAPS                            49
+#define ARM64_NCAPS                            51
 
 #endif /* __ASM_CPUCAPS_H */
index 4261d55e85069117a0cf33e434709e1a94f3f50f..92ef9539874a663b3bd693c4e834a5cf134a21d6 100644 (file)
@@ -613,6 +613,11 @@ static inline bool system_has_prio_mask_debugging(void)
               system_uses_irq_prio_masking();
 }
 
+static inline bool system_capabilities_finalized(void)
+{
+       return static_branch_likely(&arm64_const_caps_ready);
+}
+
 #define ARM64_BP_HARDEN_UNKNOWN                -1
 #define ARM64_BP_HARDEN_WA_NEEDED      0
 #define ARM64_BP_HARDEN_NOT_REQUIRED   1
index aca07c2f6e6e364e84efb9708f8a743cf24e6627..a87a93f67671d9d017afd936003ab4caf4f14bc3 100644 (file)
@@ -85,6 +85,8 @@
 #define QCOM_CPU_PART_FALKOR_V1                0x800
 #define QCOM_CPU_PART_FALKOR           0xC00
 #define QCOM_CPU_PART_KRYO             0x200
+#define QCOM_CPU_PART_KRYO_3XX_SILVER  0x803
+#define QCOM_CPU_PART_KRYO_4XX_SILVER  0x805
 
 #define NVIDIA_CPU_PART_DENVER         0x003
 #define NVIDIA_CPU_PART_CARMEL         0x004
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
 #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
index 72acd2db167f03a72c5bdfeccb9cbe58a33e9c82..ec213b4a165021be50b64f47d97b7018fb0326c7 100644 (file)
@@ -38,7 +38,7 @@ static inline void local_daif_mask(void)
        trace_hardirqs_off();
 }
 
-static inline unsigned long local_daif_save(void)
+static inline unsigned long local_daif_save_flags(void)
 {
        unsigned long flags;
 
@@ -50,6 +50,15 @@ static inline unsigned long local_daif_save(void)
                        flags |= PSR_I_BIT;
        }
 
+       return flags;
+}
+
+static inline unsigned long local_daif_save(void)
+{
+       unsigned long flags;
+
+       flags = local_daif_save_flags();
+
        local_daif_mask();
 
        return flags;
index b54d3a86c44446735bfbfe4e49ab217460fc5b27..44531a69d32b9fd1772cb131d5696decca6e08c8 100644 (file)
@@ -93,21 +93,17 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
        return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
 }
 
-#define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
-#define __efi_call_early(f, ...)       f(__VA_ARGS__)
-#define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
-#define efi_is_64bit()                 (true)
+#define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__)
+#define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__)
+#define efi_is_native()                (true)
 
-#define efi_table_attr(table, attr, instance)                          \
-       ((table##_t *)instance)->attr
+#define efi_table_attr(inst, attr)     (inst->attr)
 
-#define efi_call_proto(protocol, f, instance, ...)                     \
-       ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
 
 #define alloc_screen_info(x...)                &screen_info
 
-static inline void free_screen_info(efi_system_table_t *sys_table_arg,
-                                   struct screen_info *si)
+static inline void free_screen_info(struct screen_info *si)
 {
 }
 
index 4d5f3b5f50cdd2e1b8964e89d7b03e6828706134..b87c6e276ab194e4d80cf85150c844c94ee2d5b5 100644 (file)
@@ -45,8 +45,8 @@ void do_sysinstr(unsigned int esr, struct pt_regs *regs);
 void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
 void do_cp15instr(unsigned int esr, struct pt_regs *regs);
-void el0_svc_handler(struct pt_regs *regs);
-void el0_svc_compat_handler(struct pt_regs *regs);
+void do_el0_svc(struct pt_regs *regs);
+void do_el0_svc_compat(struct pt_regs *regs);
 void do_el0_ia_bp_hardening(unsigned long addr,  unsigned int esr,
                            struct pt_regs *regs);
 
index 3d2f2472a36cfdebe9b174ab13d0d752e526d2e7..0f00265248b5c2d67fab6b9bae98f47dfab8d665 100644 (file)
 #define KERNEL_HWCAP_SVESM4            __khwcap2_feature(SVESM4)
 #define KERNEL_HWCAP_FLAGM2            __khwcap2_feature(FLAGM2)
 #define KERNEL_HWCAP_FRINT             __khwcap2_feature(FRINT)
+#define KERNEL_HWCAP_SVEI8MM           __khwcap2_feature(SVEI8MM)
+#define KERNEL_HWCAP_SVEF32MM          __khwcap2_feature(SVEF32MM)
+#define KERNEL_HWCAP_SVEF64MM          __khwcap2_feature(SVEF64MM)
+#define KERNEL_HWCAP_SVEBF16           __khwcap2_feature(SVEBF16)
+#define KERNEL_HWCAP_I8MM              __khwcap2_feature(I8MM)
+#define KERNEL_HWCAP_BF16              __khwcap2_feature(BF16)
+#define KERNEL_HWCAP_DGH               __khwcap2_feature(DGH)
+#define KERNEL_HWCAP_RNG               __khwcap2_feature(RNG)
 
 /*
  * This yields a mask that user programs can use to figure out what
index 12a561a54128ced49c7c8686c4d6c045fa684c13..d24b527e8c00948afe2806f74c770ad40402c110 100644 (file)
@@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {}
 struct kimage_arch {
        void *dtb;
        unsigned long dtb_mem;
+       /* Core ELF header buffer */
+       void *elf_headers;
+       unsigned long elf_headers_mem;
+       unsigned long elf_headers_sz;
 };
 
 extern const struct kexec_file_ops kexec_image_ops;
index c61260cf63c5889d933ff4ee6fa68c33fae7dbed..f5acdde17f3b1ca860ce51c8f6a71d22bbfa6e1e 100644 (file)
@@ -547,7 +547,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
         * wrong, and hyp will crash and burn when it uses any
         * cpus_have_const_cap() wrapper.
         */
-       BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+       BUG_ON(!system_capabilities_finalized());
        __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
 
        /*
@@ -571,7 +571,7 @@ static inline bool kvm_arch_requires_vhe(void)
                return true;
 
        /* Some implementations have defects that confine them to VHE */
-       if (cpus_have_cap(ARM64_WORKAROUND_1165522))
+       if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
                return true;
 
        return false;
index 97f21cc666579c350ca625f26e4e84012bd77096..a3a6a2ba9a635efd7feec4542f4c0b281052521c 100644 (file)
@@ -91,11 +91,11 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
        write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
 
        /*
-        * ARM erratum 1165522 requires the actual execution of the above
-        * before we can switch to the EL1/EL0 translation regime used by
+        * ARM errata 1165522 and 1530923 require the actual execution of the
+        * above before we can switch to the EL1/EL0 translation regime used by
         * the guest.
         */
-       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
 }
 
 #endif /* __ARM64_KVM_HYP_H__ */
index 1b266292f0bee00ce1952005479053e820b5ca0e..ebee3113a62ff7bbc17e12f8de318e9db37caac3 100644 (file)
@@ -4,4 +4,20 @@
 #define __ALIGN                .align 2
 #define __ALIGN_STR    ".align 2"
 
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define SYM_FUNC_START_PI(x)                   \
+               SYM_FUNC_START_ALIAS(__pi_##x); \
+               SYM_FUNC_START(x)
+
+#define SYM_FUNC_START_WEAK_PI(x)              \
+               SYM_FUNC_START_ALIAS(__pi_##x); \
+               SYM_FUNC_START_WEAK(x)
+
+#define SYM_FUNC_END_PI(x)                     \
+               SYM_FUNC_END(x);                \
+               SYM_FUNC_END_ALIAS(__pi_##x)
+
 #endif
index 80b388278149681cbf968449bd3681d7080a8f52..d429f7701c3670101b62540f79c65ec711cba4d1 100644 (file)
@@ -4,7 +4,9 @@
 
 #include <asm/atomic_ll_sc.h>
 
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+
+#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
 
 #include <linux/compiler_types.h>
 #include <linux/export.h>
@@ -14,8 +16,6 @@
 #include <asm/atomic_lse.h>
 #include <asm/cpucaps.h>
 
-__asm__(".arch_extension       lse");
-
 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
 extern struct static_key_false arm64_const_caps_ready;
 
@@ -34,9 +34,9 @@ static inline bool system_uses_lse_atomics(void)
 
 /* In-line patching at runtime */
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)                               \
-       ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+       ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
 
-#else  /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#else  /* CONFIG_ARM64_LSE_ATOMICS */
 
 static inline bool system_uses_lse_atomics(void) { return false; }
 
@@ -44,5 +44,5 @@ static inline bool system_uses_lse_atomics(void) { return false; }
 
 #define ARM64_LSE_ATOMIC_INSN(llsc, lse)       llsc
 
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
 #endif /* __ASM_LSE_H */
index f217e32929193114d0cf8e5f85fd0bff35ba9cfe..e4d862420bb4051714f8052b01d84e817678f857 100644 (file)
@@ -29,52 +29,11 @@ typedef struct {
  */
 #define ASID(mm)       ((mm)->context.id.counter & 0xffff)
 
-static inline bool arm64_kernel_unmapped_at_el0(void)
-{
-       return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
-              cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
-}
+extern bool arm64_use_ng_mappings;
 
-static inline bool arm64_kernel_use_ng_mappings(void)
+static inline bool arm64_kernel_unmapped_at_el0(void)
 {
-       bool tx1_bug;
-
-       /* What's a kpti? Use global mappings if we don't know. */
-       if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
-               return false;
-
-       /*
-        * Note: this function is called before the CPU capabilities have
-        * been configured, so our early mappings will be global. If we
-        * later determine that kpti is required, then
-        * kpti_install_ng_mappings() will make them non-global.
-        */
-       if (arm64_kernel_unmapped_at_el0())
-               return true;
-
-       if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-               return false;
-
-       /*
-        * KASLR is enabled so we're going to be enabling kpti on non-broken
-        * CPUs regardless of their susceptibility to Meltdown. Rather
-        * than force everybody to go through the G -> nG dance later on,
-        * just put down non-global mappings from the beginning.
-        */
-       if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
-               tx1_bug = false;
-#ifndef MODULE
-       } else if (!static_branch_likely(&arm64_const_caps_ready)) {
-               extern const struct midr_range cavium_erratum_27456_cpus[];
-
-               tx1_bug = is_midr_in_range_list(read_cpuid_id(),
-                                               cavium_erratum_27456_cpus);
-#endif
-       } else {
-               tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
-       }
-
-       return !tx1_bug && kaslr_offset() > 0;
+       return arm64_use_ng_mappings;
 }
 
 typedef void (*bp_hardening_cb_t)(void);
@@ -128,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               pgprot_t prot, bool page_mappings_only);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 extern void mark_linear_text_alias_ro(void);
+extern bool kaslr_requires_kpti(void);
 
 #define INIT_MM_CONTEXT(name)  \
        .pgd = init_pg_dir,
index d9fbd433cc1753258c28176592e5383565fd3c7f..6bf5e650da7883ba560f2f43a037d4f69facc3e4 100644 (file)
 #define PUD_TABLE_BIT          (_AT(pudval_t, 1) << 1)
 #define PUD_TYPE_MASK          (_AT(pudval_t, 3) << 0)
 #define PUD_TYPE_SECT          (_AT(pudval_t, 1) << 0)
+#define PUD_SECT_RDONLY                (_AT(pudval_t, 1) << 7)         /* AP[2] */
 
 /*
  * Level 2 descriptor (PMD).
 #define TCR_HD                 (UL(1) << 40)
 #define TCR_NFD0               (UL(1) << 53)
 #define TCR_NFD1               (UL(1) << 54)
+#define TCR_E0PD0              (UL(1) << 55)
+#define TCR_E0PD1              (UL(1) << 56)
 
 /*
  * TTBR.
index 8dc6c5cdabe62e2f63065abd525ff516fe685387..6f87839f02491b5161452ea76b05eeef47821ed6 100644 (file)
@@ -26,8 +26,8 @@
 #define _PROT_DEFAULT          (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define _PROT_SECT_DEFAULT     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PTE_MAYBE_NG           (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
-#define PMD_MAYBE_NG           (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
 
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
 #define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY          __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
 
 #define __P000  PAGE_NONE
 #define __P001  PAGE_READONLY
 #define __P010  PAGE_READONLY
 #define __P011  PAGE_READONLY
-#define __P100  PAGE_EXECONLY
+#define __P100  PAGE_READONLY_EXEC
 #define __P101  PAGE_READONLY_EXEC
 #define __P110  PAGE_READONLY_EXEC
 #define __P111  PAGE_READONLY_EXEC
 #define __S001  PAGE_READONLY
 #define __S010  PAGE_SHARED
 #define __S011  PAGE_SHARED
-#define __S100  PAGE_EXECONLY
+#define __S100  PAGE_READONLY_EXEC
 #define __S101  PAGE_READONLY_EXEC
 #define __S110  PAGE_SHARED_EXEC
 #define __S111  PAGE_SHARED_EXEC
index 5d15b4735a0eba76569cae8598aa49468c702bad..cd5de0e40bfa06cf6e6fa5e094a80965b6796599 100644 (file)
@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define pte_dirty(pte)         (pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)         (!!(pte_val(pte) & PTE_VALID))
-/*
- * Execute-only user mappings do not have the PTE_USER bit set. All valid
- * kernel mappings have the PTE_UXN bit set.
- */
 #define pte_valid_not_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
+       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 #define pte_valid_young(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
 #define pte_valid_user(pte) \
@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 /*
  * p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check) other than user execute-only which do not have the
- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
+ * set.
  */
 #define pte_access_permitted(pte, write) \
        (pte_valid_user(pte) && (!(write) || pte_write(pte)))
index d4995164701494e3c8241c91b8b1eff3545f870e..80e946b2abee276479fde238d3f01e5d7eecacb3 100644 (file)
@@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset)
        return pc == preempt_offset;
 }
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 void preempt_schedule(void);
 #define __preempt_schedule() preempt_schedule()
 void preempt_schedule_notrace(void);
 #define __preempt_schedule_notrace() preempt_schedule_notrace()
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
 #endif /* __ASM_PREEMPT_H */
index 25a73aab438f947524a6b10993f5d3eaee4dbaee..3994169985efc62b2c84556934d31bd5c8c1cadd 100644 (file)
@@ -8,7 +8,6 @@
 #include <asm-generic/sections.h>
 
 extern char __alt_instructions[], __alt_instructions_end[];
-extern char __exception_text_start[], __exception_text_end[];
 extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
 extern char __hyp_text_start[], __hyp_text_end[];
index 7434844036d39fb356ed901995d8e1edd54a7003..89cba2622b79bab10c03737b414a522de6a29292 100644 (file)
@@ -26,6 +26,8 @@ DECLARE_PER_CPU(bool, fpsimd_context_busy);
 static __must_check inline bool may_use_simd(void)
 {
        /*
+        * We must make sure that the SVE has been initialized properly
+        * before using the SIMD in kernel.
         * fpsimd_context_busy is only set while preemption is disabled,
         * and is clear whenever preemption is enabled. Since
         * this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
@@ -33,8 +35,10 @@ static __must_check inline bool may_use_simd(void)
         * migrated, and if it's clear we cannot be migrated to a CPU
         * where it is set.
         */
-       return !in_irq() && !irqs_disabled() && !in_nmi() &&
-               !this_cpu_read(fpsimd_context_busy);
+       return !WARN_ON(!system_capabilities_finalized()) &&
+              system_supports_fpsimd() &&
+              !in_irq() && !irqs_disabled() && !in_nmi() &&
+              !this_cpu_read(fpsimd_context_busy);
 }
 
 #else /* ! CONFIG_KERNEL_MODE_NEON */
index b093b287babf0afbc773914992752d4977d28b61..102404dc1e135d3e533cbf4549cb8db4a858b379 100644 (file)
 /* See include/linux/spinlock.h */
 #define smp_mb__after_spinlock()       smp_mb()
 
+/*
+ * Changing this will break osq_lock() thanks to the call inside
+ * smp_cond_load_relaxed().
+ *
+ * See:
+ * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
+ */
+#define vcpu_is_preempted(cpu) false
+
 #endif /* __ASM_SPINLOCK_H */
index 6e919fafb43dd65b407a3ec537408b240319ce7d..b91570ff9db14c199cf9d6ea4fdbc6cde4f8ca12 100644 (file)
 #define SYS_ID_ISAR4_EL1               sys_reg(3, 0, 0, 2, 4)
 #define SYS_ID_ISAR5_EL1               sys_reg(3, 0, 0, 2, 5)
 #define SYS_ID_MMFR4_EL1               sys_reg(3, 0, 0, 2, 6)
+#define SYS_ID_ISAR6_EL1               sys_reg(3, 0, 0, 2, 7)
 
 #define SYS_MVFR0_EL1                  sys_reg(3, 0, 0, 3, 0)
 #define SYS_MVFR1_EL1                  sys_reg(3, 0, 0, 3, 1)
 #define SYS_CTR_EL0                    sys_reg(3, 3, 0, 0, 1)
 #define SYS_DCZID_EL0                  sys_reg(3, 3, 0, 0, 7)
 
+#define SYS_RNDR_EL0                   sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0                 sys_reg(3, 3, 2, 4, 1)
+
 #define SYS_PMCR_EL0                   sys_reg(3, 3, 9, 12, 0)
 #define SYS_PMCNTENSET_EL0             sys_reg(3, 3, 9, 12, 1)
 #define SYS_PMCNTENCLR_EL0             sys_reg(3, 3, 9, 12, 2)
                         SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
                         ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
 
+/* MAIR_ELx memory attributes (used by Linux) */
+#define MAIR_ATTR_DEVICE_nGnRnE                UL(0x00)
+#define MAIR_ATTR_DEVICE_nGnRE         UL(0x04)
+#define MAIR_ATTR_DEVICE_GRE           UL(0x0c)
+#define MAIR_ATTR_NORMAL_NC            UL(0x44)
+#define MAIR_ATTR_NORMAL_WT            UL(0xbb)
+#define MAIR_ATTR_NORMAL               UL(0xff)
+#define MAIR_ATTR_MASK                 UL(0xff)
+
+/* Position the attr at the correct index */
+#define MAIR_ATTRIDX(attr, idx)                ((attr) << ((idx) * 8))
+
 /* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT                60
 #define ID_AA64ISAR0_TS_SHIFT          52
 #define ID_AA64ISAR0_FHM_SHIFT         48
 #define ID_AA64ISAR0_DP_SHIFT          44
 #define ID_AA64ISAR0_AES_SHIFT         4
 
 /* id_aa64isar1 */
+#define ID_AA64ISAR1_I8MM_SHIFT                52
+#define ID_AA64ISAR1_DGH_SHIFT         48
+#define ID_AA64ISAR1_BF16_SHIFT                44
+#define ID_AA64ISAR1_SPECRES_SHIFT     40
 #define ID_AA64ISAR1_SB_SHIFT          36
 #define ID_AA64ISAR1_FRINTTS_SHIFT     32
 #define ID_AA64ISAR1_GPI_SHIFT         28
 #define ID_AA64PFR1_SSBS_PSTATE_INSNS  2
 
 /* id_aa64zfr0 */
+#define ID_AA64ZFR0_F64MM_SHIFT                56
+#define ID_AA64ZFR0_F32MM_SHIFT                52
+#define ID_AA64ZFR0_I8MM_SHIFT         44
 #define ID_AA64ZFR0_SM4_SHIFT          40
 #define ID_AA64ZFR0_SHA3_SHIFT         32
+#define ID_AA64ZFR0_BF16_SHIFT         20
 #define ID_AA64ZFR0_BITPERM_SHIFT      16
 #define ID_AA64ZFR0_AES_SHIFT          4
 #define ID_AA64ZFR0_SVEVER_SHIFT       0
 
+#define ID_AA64ZFR0_F64MM              0x1
+#define ID_AA64ZFR0_F32MM              0x1
+#define ID_AA64ZFR0_I8MM               0x1
+#define ID_AA64ZFR0_BF16               0x1
 #define ID_AA64ZFR0_SM4                        0x1
 #define ID_AA64ZFR0_SHA3               0x1
 #define ID_AA64ZFR0_BITPERM            0x1
 #define ID_AA64MMFR1_VMIDBITS_16       2
 
 /* id_aa64mmfr2 */
+#define ID_AA64MMFR2_E0PD_SHIFT                60
 #define ID_AA64MMFR2_FWB_SHIFT         40
 #define ID_AA64MMFR2_AT_SHIFT          32
 #define ID_AA64MMFR2_LVA_SHIFT         16
 #define ID_ISAR5_AES_SHIFT             4
 #define ID_ISAR5_SEVL_SHIFT            0
 
+#define ID_ISAR6_I8MM_SHIFT            24
+#define ID_ISAR6_BF16_SHIFT            20
+#define ID_ISAR6_SPECRES_SHIFT         16
+#define ID_ISAR6_SB_SHIFT              12
+#define ID_ISAR6_FHM_SHIFT             8
+#define ID_ISAR6_DP_SHIFT              4
+#define ID_ISAR6_JSCVT_SHIFT           0
+
 #define MVFR0_FPROUND_SHIFT            28
 #define MVFR0_FPSHVEC_SHIFT            24
 #define MVFR0_FPSQRT_SHIFT             20
index 2629a68b87244facd78a6ad5e9d4b18ca6ac18ad..5af82587909ef9931d4201f2a2850feb6d25bdcf 100644 (file)
@@ -42,7 +42,6 @@
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
 
 #ifndef __COMPAT_SYSCALL_NR
 #include <uapi/asm/unistd.h>
index c50ee1b7d5cd61d2146253f4325dc97c1ad8cfdf..537b1e6953659836a25758aa8aeb5c1441286d6d 100644 (file)
@@ -16,7 +16,7 @@
 
 #define VDSO_HAS_CLOCK_GETRES          1
 
-#define VDSO_HAS_32BIT_FALLBACK                1
+#define BUILD_VDSO32                   1
 
 static __always_inline
 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..2ca708a
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARM64_VMALLOC_H
+#define _ASM_ARM64_VMALLOC_H
+
+#endif /* _ASM_ARM64_VMALLOC_H */
index a1e72886b30c8467851edbcdfa39a3fc96473520..7752d93bb50fa486d8e6df27049775e1d61572e8 100644 (file)
 #define HWCAP2_SVESM4          (1 << 6)
 #define HWCAP2_FLAGM2          (1 << 7)
 #define HWCAP2_FRINT           (1 << 8)
+#define HWCAP2_SVEI8MM         (1 << 9)
+#define HWCAP2_SVEF32MM                (1 << 10)
+#define HWCAP2_SVEF64MM                (1 << 11)
+#define HWCAP2_SVEBF16         (1 << 12)
+#define HWCAP2_I8MM            (1 << 13)
+#define HWCAP2_BF16            (1 << 14)
+#define HWCAP2_DGH             (1 << 15)
+#define HWCAP2_RNG             (1 << 16)
 
 #endif /* _UAPI__ASM_HWCAP_H */
index 4703d218663a2ad81e7c8d4fd0749bed8199ef4f..f83a70e07df85ca5029a1e91cde93b8e0dd9fb7e 100644 (file)
@@ -19,5 +19,6 @@
 #define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SET_GET_RLIMIT
 #define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYS_CLONE3
 
 #include <asm-generic/unistd.h>
index 3a58e9db5cfe79e17354f1950b5f07282f9e23e5..a100483b47c429f56fcc7e1dd951c4e16f7c312b 100644 (file)
@@ -274,7 +274,7 @@ int apei_claim_sea(struct pt_regs *regs)
        if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
                return err;
 
-       current_flags = arch_local_save_flags();
+       current_flags = local_daif_save_flags();
 
        /*
         * SEA can interrupt SError, mask it and describe this as an NMI so
index ca158be21f833bcaeac18641777dc0709083b624..7832b32163708574d80fe2681d5aa023b4c30605 100644 (file)
@@ -618,7 +618,8 @@ static struct insn_emulation_ops setend_ops = {
 };
 
 /*
- * Invoked as late_initcall, since not needed before init spawned.
+ * Invoked as core_initcall, which guarantees that the instruction
+ * emulation is ready for userspace.
  */
 static int __init armv8_deprecated_init(void)
 {
index 6ea337d464c414ed67cd139071022500220604d1..32c7bf858dd9af47ccc46bba98f10674ce84046a 100644 (file)
@@ -42,11 +42,11 @@ ENTRY(__cpu_soft_restart)
        mov     x0, #HVC_SOFT_RESTART
        hvc     #0                              // no return
 
-1:     mov     x18, x1                         // entry
+1:     mov     x8, x1                          // entry
        mov     x0, x2                          // arg0
        mov     x1, x3                          // arg1
        mov     x2, x4                          // arg2
-       br      x18
+       br      x8
 ENDPROC(__cpu_soft_restart)
 
 .popsection
index 6a09ca7644ea16faa70073625d05f19da0605bdd..703ad0a84f9918071e738658aa4c9b9e5ae5bbd2 100644 (file)
@@ -547,6 +547,9 @@ static const struct midr_range spectre_v2_safe_list[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
        MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+       MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
        { /* sentinel */ }
 };
 
@@ -756,6 +759,20 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = {
 };
 #endif
 
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+static const struct midr_range erratum_speculative_at_vhe_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_1165522
+       /* Cortex A76 r0p0 to r2p0 */
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1530923
+       /* Cortex A55 r0p0 to r2p0 */
+       MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+#endif
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -882,12 +899,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
        },
 #endif
-#ifdef CONFIG_ARM64_ERRATUM_1165522
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
        {
-               /* Cortex-A76 r0p0 to r2p0 */
-               .desc = "ARM erratum 1165522",
-               .capability = ARM64_WORKAROUND_1165522,
-               ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+               .desc = "ARM errata 1165522, 1530923",
+               .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
+               ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_1463225
@@ -924,7 +940,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1319367
        {
                .desc = "ARM erratum 1319367",
-               .capability = ARM64_WORKAROUND_1319367,
+               .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
                ERRATA_MIDR_RANGE_LIST(ca57_a72),
        },
 #endif
index 04cf64e9f0c978a0e9578c6f8adca3570bbd5dd4..0b6715625cf6232ba15afb6eca44313c9ad33f41 100644 (file)
@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
 #define COMPAT_ELF_HWCAP_DEFAULT       \
                                (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
                                 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
-                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
-                                COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
-                                COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
                                 COMPAT_HWCAP_LPAE)
 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
 unsigned int compat_elf_hwcap2 __read_mostly;
@@ -47,19 +45,23 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
 /* Need also bit for ARM64_CB_PATCH */
 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
 
+bool arm64_use_ng_mappings = false;
+EXPORT_SYMBOL(arm64_use_ng_mappings);
+
 /*
  * Flag to indicate if we have computed the system wide
  * capabilities based on the boot time active CPUs. This
  * will be used to determine if a new booting CPU should
  * go through the verification process to make sure that it
  * supports the system capabilities, without using a hotplug
- * notifier.
+ * notifier. This is also used to decide if we could use
+ * the fast path for checking constant CPU caps.
  */
-static bool sys_caps_initialised;
-
-static inline void set_sys_caps_initialised(void)
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+static inline void finalize_system_capabilities(void)
 {
-       sys_caps_initialised = true;
+       static_branch_enable(&arm64_const_caps_ready);
 }
 
 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
@@ -119,6 +121,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
  * sync with the documentation of the CPU feature register ABI.
  */
 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
@@ -135,6 +138,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@@ -176,10 +183,18 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
@@ -225,6 +240,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
@@ -313,6 +329,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
        ARM64_FTR_END,
 };
 
+static const struct arm64_ftr_bits ftr_id_isar6[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
@@ -396,6 +423,7 @@ static const struct __ftr_reg_entry {
        ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
        ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
        ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
+       ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
 
        /* Op1 = 0, CRn = 0, CRm = 3 */
        ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
@@ -600,6 +628,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
                init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
                init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
                init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+               init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
                init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
                init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
                init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
@@ -753,6 +782,8 @@ void update_cpu_features(int cpu,
                                        info->reg_id_isar4, boot->reg_id_isar4);
                taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
                                        info->reg_id_isar5, boot->reg_id_isar5);
+               taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
+                                       info->reg_id_isar6, boot->reg_id_isar6);
 
                /*
                 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
@@ -785,7 +816,7 @@ void update_cpu_features(int cpu,
 
                /* Probe vector lengths, unless we already gave up on SVE */
                if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
-                   !sys_caps_initialised)
+                   !system_capabilities_finalized())
                        sve_update_vq_map();
        }
 
@@ -831,6 +862,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
        read_sysreg_case(SYS_ID_ISAR3_EL1);
        read_sysreg_case(SYS_ID_ISAR4_EL1);
        read_sysreg_case(SYS_ID_ISAR5_EL1);
+       read_sysreg_case(SYS_ID_ISAR6_EL1);
        read_sysreg_case(SYS_MVFR0_EL1);
        read_sysreg_case(SYS_MVFR1_EL1);
        read_sysreg_case(SYS_MVFR2_EL1);
@@ -965,6 +997,46 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
        return has_cpuid_feature(entry, scope);
 }
 
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+bool kaslr_requires_kpti(void)
+{
+       if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+               return false;
+
+       /*
+        * E0PD does a similar job to KPTI so can be used instead
+        * where available.
+        */
+       if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+               u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+               if (cpuid_feature_extract_unsigned_field(mmfr2,
+                                               ID_AA64MMFR2_E0PD_SHIFT))
+                       return false;
+       }
+
+       /*
+        * Systems affected by Cavium erratum 24756 are incompatible
+        * with KPTI.
+        */
+       if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+               extern const struct midr_range cavium_erratum_27456_cpus[];
+
+               if (is_midr_in_range_list(read_cpuid_id(),
+                                         cavium_erratum_27456_cpus))
+                       return false;
+       }
+
+       return kaslr_offset() > 0;
+}
+
 static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
@@ -975,6 +1047,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        static const struct midr_range kpti_safe_list[] = {
                MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
                MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+               MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
@@ -1008,7 +1081,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        }
 
        /* Useful for KASLR robustness */
-       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+       if (kaslr_requires_kpti()) {
                if (!__kpti_forced) {
                        str = "KASLR";
                        __kpti_forced = 1;
@@ -1043,7 +1116,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
        extern kpti_remap_fn idmap_kpti_install_ng_mappings;
        kpti_remap_fn *remap_fn;
 
-       static bool kpti_applied = false;
        int cpu = smp_processor_id();
 
        /*
@@ -1051,7 +1123,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
         * it already or we have KASLR enabled and therefore have not
         * created any global mappings at all.
         */
-       if (kpti_applied || kaslr_offset() > 0)
+       if (arm64_use_ng_mappings)
                return;
 
        remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -1061,7 +1133,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
        cpu_uninstall_idmap();
 
        if (!cpu)
-               kpti_applied = true;
+               arm64_use_ng_mappings = true;
 
        return;
 }
@@ -1251,6 +1323,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
 }
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
+#ifdef CONFIG_ARM64_E0PD
+static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
+{
+       if (this_cpu_has_cap(ARM64_HAS_E0PD))
+               sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
+}
+#endif /* CONFIG_ARM64_E0PD */
+
 #ifdef CONFIG_ARM64_PSEUDO_NMI
 static bool enable_pseudo_nmi;
 
@@ -1291,7 +1371,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_enable_pan,
        },
 #endif /* CONFIG_ARM64_PAN */
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
        {
                .desc = "LSE atomic instructions",
                .capability = ARM64_HAS_LSE_ATOMICS,
@@ -1302,7 +1382,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sign = FTR_UNSIGNED,
                .min_field_value = 2,
        },
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
        {
                .desc = "Software prefetching using PRFM",
                .capability = ARM64_HAS_NO_HW_PREFETCH,
@@ -1368,7 +1448,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                /* FP/SIMD is not implemented */
                .capability = ARM64_HAS_NO_FPSIMD,
-               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
                .min_field_value = 0,
                .matches = has_no_fpsimd,
        },
@@ -1566,6 +1646,31 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sign = FTR_UNSIGNED,
                .min_field_value = 1,
        },
+#endif
+#ifdef CONFIG_ARM64_E0PD
+       {
+               .desc = "E0PD",
+               .capability = ARM64_HAS_E0PD,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .sign = FTR_UNSIGNED,
+               .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
+               .matches = has_cpuid_feature,
+               .min_field_value = 1,
+               .cpu_enable = cpu_enable_e0pd,
+       },
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+       {
+               .desc = "Random Number Generator",
+               .capability = ARM64_HAS_RNG,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR0_EL1,
+               .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = 1,
+       },
 #endif
        {},
 };
@@ -1596,6 +1701,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .match_list = list,                                             \
        }
 
+#define HWCAP_CAP_MATCH(match, cap_type, cap)                                  \
+       {                                                                       \
+               __HWCAP_CAP(#cap, cap_type, cap)                                \
+               .matches = match,                                               \
+       }
+
 #ifdef CONFIG_ARM64_PTR_AUTH
 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
        {
@@ -1638,6 +1749,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
@@ -1651,6 +1763,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
        HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
 #ifdef CONFIG_ARM64_SVE
        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
@@ -1658,8 +1773,12 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
        HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+       HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
 #endif
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
 #ifdef CONFIG_ARM64_PTR_AUTH
@@ -1669,8 +1788,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        {},
 };
 
+#ifdef CONFIG_COMPAT
+static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
+{
+       /*
+        * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
+        * in line with that of arm32 as in vfp_init(). We make sure that the
+        * check is future proof, by making sure value is non-zero.
+        */
+       u32 mvfr1;
+
+       WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+       if (scope == SCOPE_SYSTEM)
+               mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
+       else
+               mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
+
+       return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
+               cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
+               cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+}
+#endif
+
 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
 #ifdef CONFIG_COMPAT
+       HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
+       HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+       /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
+       HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+       HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
@@ -1974,7 +2120,7 @@ void check_local_cpu_capabilities(void)
         * Otherwise, this CPU should verify that it has all the system
         * advertised capabilities.
         */
-       if (!sys_caps_initialised)
+       if (!system_capabilities_finalized())
                update_cpu_capabilities(SCOPE_LOCAL_CPU);
        else
                verify_local_cpu_capabilities();
@@ -1988,14 +2134,6 @@ static void __init setup_boot_cpu_capabilities(void)
        enable_cpu_capabilities(SCOPE_BOOT_CPU);
 }
 
-DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
-EXPORT_SYMBOL(arm64_const_caps_ready);
-
-static void __init mark_const_caps_ready(void)
-{
-       static_branch_enable(&arm64_const_caps_ready);
-}
-
 bool this_cpu_has_cap(unsigned int n)
 {
        if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
@@ -2054,7 +2192,6 @@ void __init setup_cpu_features(void)
        u32 cwg;
 
        setup_system_capabilities();
-       mark_const_caps_ready();
        setup_elf_hwcaps(arm64_elf_hwcaps);
 
        if (system_supports_32bit_el0())
@@ -2067,7 +2204,7 @@ void __init setup_cpu_features(void)
        minsigstksz_setup();
 
        /* Advertise that we have computed the system capabilities */
-       set_sys_caps_initialised();
+       finalize_system_capabilities();
 
        /*
         * Check for sane CTR_EL0.CWG value.
index 56bba746da1c227f92c9922cce556dfd9802e038..86136075ae41042ec32bb0b755ef38a7ce06d051 100644 (file)
@@ -84,6 +84,14 @@ static const char *const hwcap_str[] = {
        "svesm4",
        "flagm2",
        "frint",
+       "svei8mm",
+       "svef32mm",
+       "svef64mm",
+       "svebf16",
+       "i8mm",
+       "bf16",
+       "dgh",
+       "rng",
        NULL
 };
 
@@ -360,6 +368,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
                info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
                info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
                info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+               info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
                info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
                info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
                info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
index 5dce5e56995aa72bfec378b15df65cb1fbe8917b..fde59981445ca9a0845102c905a8520773f618bf 100644 (file)
@@ -36,14 +36,14 @@ static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
 }
 NOKPROBE_SYMBOL(el1_pc);
 
-static void el1_undef(struct pt_regs *regs)
+static void notrace el1_undef(struct pt_regs *regs)
 {
        local_daif_inherit(regs);
        do_undefinstr(regs);
 }
 NOKPROBE_SYMBOL(el1_undef);
 
-static void el1_inv(struct pt_regs *regs, unsigned long esr)
+static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
 {
        local_daif_inherit(regs);
        bad_mode(regs, 0, esr);
@@ -215,7 +215,7 @@ static void notrace el0_svc(struct pt_regs *regs)
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-       el0_svc_handler(regs);
+       do_el0_svc(regs);
 }
 NOKPROBE_SYMBOL(el0_svc);
 
@@ -281,7 +281,7 @@ static void notrace el0_svc_compat(struct pt_regs *regs)
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-       el0_svc_compat_handler(regs);
+       do_el0_svc_compat(regs);
 }
 NOKPROBE_SYMBOL(el0_svc_compat);
 
index 7c6a0a41676f83ce0f40cfc1e3197a03e6ca3570..9461d812ae27ccfb52efa147d2edae9e51710980 100644 (file)
        .macro kernel_ventry, el, label, regsize = 64
        .align 7
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
        .if     \el == 0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
        .if     \regsize == 64
        mrs     x30, tpidrro_el0
        msr     tpidrro_el0, xzr
        .else
        mov     x30, xzr
        .endif
-       .endif
 alternative_else_nop_endif
+       .endif
 #endif
 
        sub     sp, sp, #S_FRAME_SIZE
@@ -167,9 +167,13 @@ alternative_cb_end
        .if     \el == 0
        clear_gp_regs
        mrs     x21, sp_el0
-       ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
-       ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
-       disable_step_tsk x19, x20               // exceptions when scheduling.
+       ldr_this_cpu    tsk, __entry_task, x20
+       msr     sp_el0, tsk
+
+       // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+       // when scheduling.
+       ldr     x19, [tsk, #TSK_TI_FLAGS]
+       disable_step_tsk x19, x20
 
        apply_ssbd 1, x22, x23
 
@@ -232,13 +236,6 @@ alternative_else_nop_endif
        str     w21, [sp, #S_SYSCALLNO]
        .endif
 
-       /*
-        * Set sp_el0 to current thread_info.
-        */
-       .if     \el == 0
-       msr     sp_el0, tsk
-       .endif
-
        /* Save pmr */
 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
        mrs_s   x20, SYS_ICC_PMR_EL1
@@ -605,7 +602,7 @@ el1_irq:
 
        irq_handler
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
        /*
@@ -653,6 +650,7 @@ el0_sync:
        mov     x0, sp
        bl      el0_sync_handler
        b       ret_to_user
+ENDPROC(el0_sync)
 
 #ifdef CONFIG_COMPAT
        .align  6
@@ -661,16 +659,18 @@ el0_sync_compat:
        mov     x0, sp
        bl      el0_sync_compat_handler
        b       ret_to_user
-ENDPROC(el0_sync)
+ENDPROC(el0_sync_compat)
 
        .align  6
 el0_irq_compat:
        kernel_entry 0, 32
        b       el0_irq_naked
+ENDPROC(el0_irq_compat)
 
 el0_error_compat:
        kernel_entry 0, 32
        b       el0_error_naked
+ENDPROC(el0_error_compat)
 #endif
 
        .align  6
index 3eb338f143868ccb11c4a87b5e80f85edc0255c5..94289d12699330f9f058f61cc253b3d7046f0c71 100644 (file)
@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
  */
 static void task_fpsimd_load(void)
 {
+       WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
        if (system_supports_sve() && test_thread_flag(TIF_SVE))
@@ -289,6 +290,7 @@ static void fpsimd_save(void)
                this_cpu_ptr(&fpsimd_last_state);
        /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
 
+       WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
        struct fpsimd_last_state_struct *last =
                this_cpu_ptr(&fpsimd_last_state);
 
+       WARN_ON(!system_supports_fpsimd());
        last->st = &current->thread.uw.fpsimd_state;
        last->sve_state = current->thread.sve_state;
        last->sve_vl = current->thread.sve_vl;
@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
        struct fpsimd_last_state_struct *last =
                this_cpu_ptr(&fpsimd_last_state);
 
+       WARN_ON(!system_supports_fpsimd());
        WARN_ON(!in_softirq() && !irqs_disabled());
 
        last->st = st;
@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
  */
 void fpsimd_restore_current_state(void)
 {
-       if (!system_supports_fpsimd())
+       /*
+        * For the tasks that were created before we detected the absence of
+        * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
+        * e.g, init. This could be then inherited by the children processes.
+        * If we later detect that the system doesn't support FP/SIMD,
+        * we must clear the flag for  all the tasks to indicate that the
+        * FPSTATE is clean (as we can't have one) to avoid looping for ever in
+        * do_notify_resume().
+        */
+       if (!system_supports_fpsimd()) {
+               clear_thread_flag(TIF_FOREIGN_FPSTATE);
                return;
+       }
 
        get_cpu_fpsimd_context();
 
@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
  */
 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
 {
-       if (!system_supports_fpsimd())
+       if (WARN_ON(!system_supports_fpsimd()))
                return;
 
        get_cpu_fpsimd_context();
@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
 void fpsimd_flush_task_state(struct task_struct *t)
 {
        t->thread.fpsimd_cpu = NR_CPUS;
-
+       /*
+        * If we don't support fpsimd, bail out after we have
+        * reset the fpsimd_cpu for this task and clear the
+        * FPSTATE.
+        */
+       if (!system_supports_fpsimd())
+               return;
        barrier();
        set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
 
@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
  */
 static void fpsimd_flush_cpu_state(void)
 {
+       WARN_ON(!system_supports_fpsimd());
        __this_cpu_write(fpsimd_last_state.st, NULL);
        set_thread_flag(TIF_FOREIGN_FPSTATE);
 }
@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
  */
 void fpsimd_save_and_flush_cpu_state(void)
 {
+       if (!system_supports_fpsimd())
+               return;
        WARN_ON(preemptible());
        __get_cpu_fpsimd_context();
        fpsimd_save();
index a96b2921d22c2f12b4fdbb1bb1a4b918d8bd66c5..590963c9c6094435c1bd5bbe599327f7f1264e94 100644 (file)
@@ -182,78 +182,79 @@ int arch_hibernation_header_restore(void *addr)
 }
 EXPORT_SYMBOL(arch_hibernation_header_restore);
 
-/*
- * Copies length bytes, starting at src_start into an new page,
- * perform cache maintentance, then maps it at the specified address low
- * address as executable.
- *
- * This is used by hibernate to copy the code it needs to execute when
- * overwriting the kernel text. This function generates a new set of page
- * tables, which it loads into ttbr0.
- *
- * Length is provided as we probably only want 4K of data, even on a 64K
- * page system.
- */
-static int create_safe_exec_page(void *src_start, size_t length,
-                                unsigned long dst_addr,
-                                phys_addr_t *phys_dst_addr,
-                                void *(*allocator)(gfp_t mask),
-                                gfp_t mask)
+static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
+                      unsigned long dst_addr,
+                      pgprot_t pgprot)
 {
-       int rc = 0;
-       pgd_t *trans_pgd;
        pgd_t *pgdp;
        pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
-       unsigned long dst = (unsigned long)allocator(mask);
-
-       if (!dst) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       memcpy((void *)dst, src_start, length);
-       __flush_icache_range(dst, dst + length);
-
-       trans_pgd = allocator(mask);
-       if (!trans_pgd) {
-               rc = -ENOMEM;
-               goto out;
-       }
 
        pgdp = pgd_offset_raw(trans_pgd, dst_addr);
        if (pgd_none(READ_ONCE(*pgdp))) {
-               pudp = allocator(mask);
-               if (!pudp) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
+               pudp = (void *)get_safe_page(GFP_ATOMIC);
+               if (!pudp)
+                       return -ENOMEM;
                pgd_populate(&init_mm, pgdp, pudp);
        }
 
        pudp = pud_offset(pgdp, dst_addr);
        if (pud_none(READ_ONCE(*pudp))) {
-               pmdp = allocator(mask);
-               if (!pmdp) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
+               pmdp = (void *)get_safe_page(GFP_ATOMIC);
+               if (!pmdp)
+                       return -ENOMEM;
                pud_populate(&init_mm, pudp, pmdp);
        }
 
        pmdp = pmd_offset(pudp, dst_addr);
        if (pmd_none(READ_ONCE(*pmdp))) {
-               ptep = allocator(mask);
-               if (!ptep) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
+               ptep = (void *)get_safe_page(GFP_ATOMIC);
+               if (!ptep)
+                       return -ENOMEM;
                pmd_populate_kernel(&init_mm, pmdp, ptep);
        }
 
        ptep = pte_offset_kernel(pmdp, dst_addr);
-       set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
+       set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
+
+       return 0;
+}
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintenance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+                                unsigned long dst_addr,
+                                phys_addr_t *phys_dst_addr)
+{
+       void *page = (void *)get_safe_page(GFP_ATOMIC);
+       pgd_t *trans_pgd;
+       int rc;
+
+       if (!page)
+               return -ENOMEM;
+
+       memcpy(page, src_start, length);
+       __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+
+       trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
+       if (!trans_pgd)
+               return -ENOMEM;
+
+       rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
+                               PAGE_KERNEL_EXEC);
+       if (rc)
+               return rc;
 
        /*
         * Load our new page tables. A strict BBM approach requires that we
@@ -269,13 +270,12 @@ static int create_safe_exec_page(void *src_start, size_t length,
         */
        cpu_set_reserved_ttbr0();
        local_flush_tlb_all();
-       write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
+       write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
        isb();
 
-       *phys_dst_addr = virt_to_phys((void *)dst);
+       *phys_dst_addr = virt_to_phys(page);
 
-out:
-       return rc;
+       return 0;
 }
 
 #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
@@ -450,7 +450,7 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
                                return -ENOMEM;
                } else {
                        set_pud(dst_pudp,
-                               __pud(pud_val(pud) & ~PMD_SECT_RDONLY));
+                               __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
                }
        } while (dst_pudp++, src_pudp++, addr = next, addr != end);
 
@@ -476,6 +476,24 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
        return 0;
 }
 
+static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
+                         unsigned long end)
+{
+       int rc;
+       pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+
+       if (!trans_pgd) {
+               pr_err("Failed to allocate memory for temporary page tables.\n");
+               return -ENOMEM;
+       }
+
+       rc = copy_page_tables(trans_pgd, start, end);
+       if (!rc)
+               *dst_pgdp = trans_pgd;
+
+       return rc;
+}
+
 /*
  * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
  *
@@ -484,7 +502,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
  */
 int swsusp_arch_resume(void)
 {
-       int rc = 0;
+       int rc;
        void *zero_page;
        size_t exit_size;
        pgd_t *tmp_pg_dir;
@@ -497,15 +515,9 @@ int swsusp_arch_resume(void)
         * Create a second copy of just the linear map, and use this when
         * restoring.
         */
-       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
-       if (!tmp_pg_dir) {
-               pr_err("Failed to allocate memory for temporary page tables.\n");
-               rc = -ENOMEM;
-               goto out;
-       }
-       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
+       rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
        if (rc)
-               goto out;
+               return rc;
 
        /*
         * We need a zero page that is zero before & after resume in order to
@@ -514,8 +526,7 @@ int swsusp_arch_resume(void)
        zero_page = (void *)get_safe_page(GFP_ATOMIC);
        if (!zero_page) {
                pr_err("Failed to allocate zero page.\n");
-               rc = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
 
        /*
@@ -530,11 +541,10 @@ int swsusp_arch_resume(void)
         */
        rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
                                   (unsigned long)hibernate_exit,
-                                  &phys_hibernate_exit,
-                                  (void *)get_safe_page, GFP_ATOMIC);
+                                  &phys_hibernate_exit);
        if (rc) {
                pr_err("Failed to create safe executable page for hibernate_exit code.\n");
-               goto out;
+               return rc;
        }
 
        /*
@@ -561,8 +571,7 @@ int swsusp_arch_resume(void)
                       resume_hdr.reenter_kernel, restore_pblist,
                       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
 
-out:
-       return rc;
+       return 0;
 }
 
 int hibernate_resume_nonboot_cpu_disable(void)
index 2a11a962e571da953e4af15e8d5b4d499ddfe4fd..53b8a4ee64ff0cb68f959b4562d0ae0b64c14ac5 100644 (file)
@@ -120,6 +120,17 @@ u64 __init kaslr_early_init(u64 dt_phys)
                return 0;
        }
 
+       /*
+        * Mix in any entropy obtainable architecturally, open coded
+        * since this runs extremely early.
+        */
+       if (__early_cpu_has_rndr()) {
+               unsigned long raw;
+
+               if (__arm64_rndr(&raw))
+                       seed ^= raw;
+       }
+
        if (!seed) {
                kaslr_status = KASLR_DISABLED_NO_SEED;
                return 0;
index 29a9428486a5778e229d83aaa464644f33c29c09..af9987c154cabdf284330727e322959c03a30815 100644 (file)
@@ -47,10 +47,6 @@ static void *image_load(struct kimage *image,
        struct kexec_segment *kernel_segment;
        int ret;
 
-       /* We don't support crash kernels yet. */
-       if (image->type == KEXEC_TYPE_CRASH)
-               return ERR_PTR(-EOPNOTSUPP);
-
        /*
         * We require a kernel with an unambiguous Image header. Per
         * Documentation/arm64/booting.rst, this is the case when image_size
index 0df8493624e0036ceb3782f9fa7b5fe12ce64501..8e9c924423b4ead238f73c1db04fe81bf6979b15 100644 (file)
@@ -160,18 +160,6 @@ void machine_kexec(struct kimage *kimage)
 
        kexec_image_info(kimage);
 
-       pr_debug("%s:%d: control_code_page:        %p\n", __func__, __LINE__,
-               kimage->control_code_page);
-       pr_debug("%s:%d: reboot_code_buffer_phys:  %pa\n", __func__, __LINE__,
-               &reboot_code_buffer_phys);
-       pr_debug("%s:%d: reboot_code_buffer:       %p\n", __func__, __LINE__,
-               reboot_code_buffer);
-       pr_debug("%s:%d: relocate_new_kernel:      %p\n", __func__, __LINE__,
-               arm64_relocate_new_kernel);
-       pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
-               __func__, __LINE__, arm64_relocate_new_kernel_size,
-               arm64_relocate_new_kernel_size);
-
        /*
         * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
         * after the kernel is shut down.
index 7b08bf9499b6b5a338d68d3f8baa08741e720c53..dd3ae8081b38e9bb2a19eafa985a1c728abcdd49 100644 (file)
 #include <linux/memblock.h>
 #include <linux/of_fdt.h>
 #include <linux/random.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 #include <asm/byteorder.h>
 
 /* relevant device tree properties */
+#define FDT_PROP_KEXEC_ELFHDR  "linux,elfcorehdr"
+#define FDT_PROP_MEM_RANGE     "linux,usable-memory-range"
 #define FDT_PROP_INITRD_START  "linux,initrd-start"
 #define FDT_PROP_INITRD_END    "linux,initrd-end"
 #define FDT_PROP_BOOTARGS      "bootargs"
@@ -40,6 +43,10 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
        vfree(image->arch.dtb);
        image->arch.dtb = NULL;
 
+       vfree(image->arch.elf_headers);
+       image->arch.elf_headers = NULL;
+       image->arch.elf_headers_sz = 0;
+
        return kexec_image_post_load_cleanup_default(image);
 }
 
@@ -55,6 +62,31 @@ static int setup_dtb(struct kimage *image,
 
        off = ret;
 
+       ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR);
+       if (ret && ret != -FDT_ERR_NOTFOUND)
+               goto out;
+       ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE);
+       if (ret && ret != -FDT_ERR_NOTFOUND)
+               goto out;
+
+       if (image->type == KEXEC_TYPE_CRASH) {
+               /* add linux,elfcorehdr */
+               ret = fdt_appendprop_addrrange(dtb, 0, off,
+                               FDT_PROP_KEXEC_ELFHDR,
+                               image->arch.elf_headers_mem,
+                               image->arch.elf_headers_sz);
+               if (ret)
+                       return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+
+               /* add linux,usable-memory-range */
+               ret = fdt_appendprop_addrrange(dtb, 0, off,
+                               FDT_PROP_MEM_RANGE,
+                               crashk_res.start,
+                               crashk_res.end - crashk_res.start + 1);
+               if (ret)
+                       return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+       }
+
        /* add bootargs */
        if (cmdline) {
                ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
@@ -125,8 +157,8 @@ out:
 }
 
 /*
- * More space needed so that we can add initrd, bootargs, kaslr-seed, and
- * rng-seed.
+ * More space needed so that we can add initrd, bootargs, kaslr-seed,
+ * rng-seed, userable-memory-range and elfcorehdr.
  */
 #define DTB_EXTRA_SPACE 0x1000
 
@@ -174,6 +206,43 @@ static int create_dtb(struct kimage *image,
        }
 }
 
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+       struct crash_mem *cmem;
+       unsigned int nr_ranges;
+       int ret;
+       u64 i;
+       phys_addr_t start, end;
+
+       nr_ranges = 1; /* for exclusion of crashkernel region */
+       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+                                       MEMBLOCK_NONE, &start, &end, NULL)
+               nr_ranges++;
+
+       cmem = kmalloc(sizeof(struct crash_mem) +
+                       sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
+       if (!cmem)
+               return -ENOMEM;
+
+       cmem->max_nr_ranges = nr_ranges;
+       cmem->nr_ranges = 0;
+       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+                                       MEMBLOCK_NONE, &start, &end, NULL) {
+               cmem->ranges[cmem->nr_ranges].start = start;
+               cmem->ranges[cmem->nr_ranges].end = end - 1;
+               cmem->nr_ranges++;
+       }
+
+       /* Exclude crashkernel region */
+       ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+
+       if (!ret)
+               ret =  crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+       kfree(cmem);
+       return ret;
+}
+
 int load_other_segments(struct kimage *image,
                        unsigned long kernel_load_addr,
                        unsigned long kernel_size,
@@ -181,14 +250,43 @@ int load_other_segments(struct kimage *image,
                        char *cmdline)
 {
        struct kexec_buf kbuf;
-       void *dtb = NULL;
-       unsigned long initrd_load_addr = 0, dtb_len;
+       void *headers, *dtb = NULL;
+       unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
        int ret = 0;
 
        kbuf.image = image;
        /* not allocate anything below the kernel */
        kbuf.buf_min = kernel_load_addr + kernel_size;
 
+       /* load elf core header */
+       if (image->type == KEXEC_TYPE_CRASH) {
+               ret = prepare_elf_headers(&headers, &headers_sz);
+               if (ret) {
+                       pr_err("Preparing elf core header failed\n");
+                       goto out_err;
+               }
+
+               kbuf.buffer = headers;
+               kbuf.bufsz = headers_sz;
+               kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+               kbuf.memsz = headers_sz;
+               kbuf.buf_align = SZ_64K; /* largest supported page size */
+               kbuf.buf_max = ULONG_MAX;
+               kbuf.top_down = true;
+
+               ret = kexec_add_buffer(&kbuf);
+               if (ret) {
+                       vfree(headers);
+                       goto out_err;
+               }
+               image->arch.elf_headers = headers;
+               image->arch.elf_headers_mem = kbuf.mem;
+               image->arch.elf_headers_sz = headers_sz;
+
+               pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+                        image->arch.elf_headers_mem, headers_sz, headers_sz);
+       }
+
        /* load initrd */
        if (initrd) {
                kbuf.buffer = initrd;
index 71f788cd2b18772629c4627be979f75753253d08..bbb0f0c145f6f5e4201ec728064fe992b9ee801c 100644 (file)
@@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 
 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
 
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
-               unsigned long stk_sz, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+               unsigned long stk_sz, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                }
 
                /*
-                * If a TLS pointer was passed to clone (4th argument), use it
-                * for the new thread.
+                * If a TLS pointer was passed to clone, use it for the new
+                * thread.
                 */
                if (clone_flags & CLONE_SETTLS)
-                       p->thread.uw.tp_value = childregs->regs[3];
+                       p->thread.uw.tp_value = tls;
        } else {
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->pstate = PSR_MODE_EL1h;
@@ -646,6 +646,6 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void)
         * Only allow a task to be preempted once cpufeatures have been
         * enabled.
         */
-       if (static_branch_likely(&arm64_const_caps_ready))
+       if (system_capabilities_finalized())
                preempt_schedule_irq();
 }
index 6771c399d40ca3d6f68632f670f7d1b7c848d9d0..cd6e5fa48b9cd3a156ecdf18267fadeccdd08e37 100644 (file)
@@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
        return 0;
 }
 
+static int fpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+       if (!system_supports_fpsimd())
+               return -ENODEV;
+       return regset->n;
+}
+
 /*
  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  */
@@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        if (target == current)
                fpsimd_preserve_current_state();
 
@@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 {
        int ret;
 
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
        if (ret)
                return ret;
@@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
                 */
                .size = sizeof(u32),
                .align = sizeof(u32),
+               .active = fpr_active,
                .get = fpr_get,
                .set = fpr_set
        },
@@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
        compat_ulong_t fpscr;
        int ret, vregs_end_pos;
 
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        uregs = &target->thread.uw.fpsimd_state;
 
        if (target == current)
@@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
        compat_ulong_t fpscr;
        int ret, vregs_end_pos;
 
+       if (!system_supports_fpsimd())
+               return -EINVAL;
+
        uregs = &target->thread.uw.fpsimd_state;
 
        vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
@@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
                .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
                .size = sizeof(compat_ulong_t),
                .align = sizeof(compat_ulong_t),
+               .active = fpr_active,
                .get = compat_vfp_get,
                .set = compat_vfp_set
        },
index 56f6645617548600de5c0fa25133753d357895e3..b6f9455d7ca3ad6da0a2785954f498c1e644b5c4 100644 (file)
@@ -285,6 +285,13 @@ void __init setup_arch(char **cmdline_p)
 
        *cmdline_p = boot_command_line;
 
+       /*
+        * If know now we are going to need KPTI then use non-global
+        * mappings from the start, avoiding the cost of rewriting
+        * everything later.
+        */
+       arm64_use_ng_mappings = kaslr_requires_kpti();
+
        early_fixmap_init();
        early_ioremap_init();
 
index dd2cdc0d5be20cd4d1da02413dd65e8eca008d38..339882db5a9159bfd888d933dc5fff235408e475 100644 (file)
@@ -371,6 +371,8 @@ static int parse_user_sigframe(struct user_ctxs *user,
                        goto done;
 
                case FPSIMD_MAGIC:
+                       if (!system_supports_fpsimd())
+                               goto invalid;
                        if (user->fpsimd)
                                goto invalid;
 
@@ -506,7 +508,7 @@ static int restore_sigframe(struct pt_regs *regs,
        if (err == 0)
                err = parse_user_sigframe(&user, sf);
 
-       if (err == 0) {
+       if (err == 0 && system_supports_fpsimd()) {
                if (!user.fpsimd)
                        return -EINVAL;
 
@@ -623,7 +625,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
 
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 
-       if (err == 0) {
+       if (err == 0 && system_supports_fpsimd()) {
                struct fpsimd_context __user *fpsimd_ctx =
                        apply_user_offset(user, user->fpsimd_offset);
                err |= preserve_fpsimd_context(fpsimd_ctx);
index 12a585386c2f2b17e10b1a6086eabedf2d203926..82feca6f70521c4f699a75badc92f535201d7671 100644 (file)
@@ -223,7 +223,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
        err |= !valid_user_regs(&regs->user_regs, current);
 
        aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
-       if (err == 0)
+       if (err == 0 && system_supports_fpsimd())
                err |= compat_restore_vfp_context(&aux->vfp);
 
        return err;
@@ -419,7 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
 
        aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
 
-       if (err == 0)
+       if (err == 0 && system_supports_fpsimd())
                err |= compat_preserve_vfp_context(&aux->vfp);
        __put_user_error(0, &aux->end_magic, err);
 
index 52cfc6148355f8d129fd307010a1aabecf515631..b26955f567501cc7103435a459dde7700dffba18 100644 (file)
@@ -37,7 +37,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
 
        /* Unsupported */
        if (state == ARM64_SSBD_UNKNOWN)
-               return -EINVAL;
+               return -ENODEV;
 
        /* Treat the unaffected/mitigated state separately */
        if (state == ARM64_SSBD_MITIGATED) {
@@ -102,7 +102,7 @@ static int ssbd_prctl_get(struct task_struct *task)
 {
        switch (arm64_get_ssbd_state()) {
        case ARM64_SSBD_UNKNOWN:
-               return -EINVAL;
+               return -ENODEV;
        case ARM64_SSBD_FORCE_ENABLE:
                return PR_SPEC_DISABLE;
        case ARM64_SSBD_KERNEL:
index 9a9d98a443fc17d6a1a77822deab7a47d7e4254a..a12c0c88d3457357de64c32863690726639112f2 100644 (file)
@@ -154,14 +154,14 @@ static inline void sve_user_discard(void)
        sve_user_disable();
 }
 
-void el0_svc_handler(struct pt_regs *regs)
+void do_el0_svc(struct pt_regs *regs)
 {
        sve_user_discard();
        el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
 }
 
 #ifdef CONFIG_COMPAT
-void el0_svc_compat_handler(struct pt_regs *regs)
+void do_el0_svc_compat(struct pt_regs *regs)
 {
        el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
                       compat_sys_call_table);
index 73caf35c2262cc4d8f2dba7d8b619a1caef3dad1..cf402be5c573ff882797422ee02bc913d7c17f24 100644 (file)
@@ -144,9 +144,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
 
 #ifdef CONFIG_PREEMPT
 #define S_PREEMPT " PREEMPT"
+#elif defined(CONFIG_PREEMPT_RT)
+#define S_PREEMPT " PREEMPT_RT"
 #else
 #define S_PREEMPT ""
 #endif
+
 #define S_SMP " SMP"
 
 static int __die(const char *str, int err, struct pt_regs *regs)
index e5cc8d66bf537e3c7719d96e7e75853b0f578e61..0c6832ec52b11540545ae24fa5dde7a1fa29a4e0 100644 (file)
        .text
        .pushsection    .hyp.text, "ax"
 
+/*
+ * We treat x18 as callee-saved as the host may use it as a platform
+ * register (e.g. for shadow call stack).
+ */
 .macro save_callee_saved_regs ctxt
+       str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
        stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
        stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
        stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
@@ -32,6 +37,8 @@
 .endm
 
 .macro restore_callee_saved_regs ctxt
+       // We require \ctxt is not x18-x28
+       ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
        ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
        ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
        ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
@@ -48,7 +55,7 @@ ENTRY(__guest_enter)
        // x0: vcpu
        // x1: host context
        // x2-x17: clobbered by macros
-       // x18: guest context
+       // x29: guest context
 
        // Store the host regs
        save_callee_saved_regs x1
@@ -67,31 +74,28 @@ alternative_else_nop_endif
        ret
 
 1:
-       add     x18, x0, #VCPU_CONTEXT
+       add     x29, x0, #VCPU_CONTEXT
 
        // Macro ptrauth_switch_to_guest format:
        //      ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
        // The below macro to restore guest keys is not implemented in C code
        // as it may cause Pointer Authentication key signing mismatch errors
        // when this feature is enabled for kernel code.
-       ptrauth_switch_to_guest x18, x0, x1, x2
+       ptrauth_switch_to_guest x29, x0, x1, x2
 
        // Restore guest regs x0-x17
-       ldp     x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
-       ldp     x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
-       ldp     x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
-       ldp     x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
-       ldp     x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
-       ldp     x10, x11, [x18, #CPU_XREG_OFFSET(10)]
-       ldp     x12, x13, [x18, #CPU_XREG_OFFSET(12)]
-       ldp     x14, x15, [x18, #CPU_XREG_OFFSET(14)]
-       ldp     x16, x17, [x18, #CPU_XREG_OFFSET(16)]
-
-       // Restore guest regs x19-x29, lr
-       restore_callee_saved_regs x18
-
-       // Restore guest reg x18
-       ldr     x18,      [x18, #CPU_XREG_OFFSET(18)]
+       ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
+       ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
+       ldp     x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
+       ldp     x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
+       ldp     x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
+       ldp     x10, x11, [x29, #CPU_XREG_OFFSET(10)]
+       ldp     x12, x13, [x29, #CPU_XREG_OFFSET(12)]
+       ldp     x14, x15, [x29, #CPU_XREG_OFFSET(14)]
+       ldp     x16, x17, [x29, #CPU_XREG_OFFSET(16)]
+
+       // Restore guest regs x18-x29, lr
+       restore_callee_saved_regs x29
 
        // Do not touch any register after this!
        eret
@@ -114,7 +118,7 @@ ENTRY(__guest_exit)
        // Retrieve the guest regs x0-x1 from the stack
        ldp     x2, x3, [sp], #16       // x0, x1
 
-       // Store the guest regs x0-x1 and x4-x18
+       // Store the guest regs x0-x1 and x4-x17
        stp     x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
        stp     x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
        stp     x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
@@ -123,9 +127,8 @@ ENTRY(__guest_exit)
        stp     x12, x13, [x1, #CPU_XREG_OFFSET(12)]
        stp     x14, x15, [x1, #CPU_XREG_OFFSET(14)]
        stp     x16, x17, [x1, #CPU_XREG_OFFSET(16)]
-       str     x18,      [x1, #CPU_XREG_OFFSET(18)]
 
-       // Store the guest regs x19-x29, lr
+       // Store the guest regs x18-x29, lr
        save_callee_saved_regs x1
 
        get_host_ctxt   x2, x3
index 72fbbd86eb5e8fd1bf79cc65e8950d2fb66e3453..dfe8dd1725128405a1661a946782d812695d2d15 100644 (file)
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+       /*
+        * When the system doesn't support FP/SIMD, we cannot rely on
+        * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
+        * abort on the very first access to FP and thus we should never
+        * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
+        * trap the accesses.
+        */
+       if (!system_supports_fpsimd() ||
+           vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
                vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
                                      KVM_ARM64_FP_HOST);
 
@@ -119,7 +127,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        write_sysreg(val, cptr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
 
                isb();
@@ -158,11 +166,11 @@ static void deactivate_traps_vhe(void)
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
 
        /*
-        * ARM erratum 1165522 requires the actual execution of the above
-        * before we can switch to the EL2/EL0 translation regime used by
+        * ARM errata 1165522 and 1530923 require the actual execution of the
+        * above before we can switch to the EL2/EL0 translation regime used by
         * the host.
         */
-       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
 
        write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
        write_sysreg(vectors, vbar_el1);
@@ -173,7 +181,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
 {
        u64 mdcr_el2 = read_sysreg(mdcr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
index 22b8128d19f62855d181d930ee2c18f4fde7845c..7672a978926cabc18abf239560becf70b60bce29 100644 (file)
@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
        write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
 
-       if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
                write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
        } else  if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
        write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
            ctxt->__hyp_running_vcpu) {
                /*
                 * Must only be done for host registers, hence the context
index c2bc17ca6430b635dc3d20c4efc941088c5ca1de..92f560e3e1aa134f00197019e75d0bf2c2ca0ee2 100644 (file)
@@ -23,10 +23,10 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 
        local_irq_save(cxt->flags);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
                /*
-                * For CPUs that are affected by ARM erratum 1165522, we
-                * cannot trust stage-1 to be in a correct state at that
+                * For CPUs that are affected by ARM errata 1165522 or 1530923,
+                * we cannot trust stage-1 to be in a correct state at that
                 * point. Since we do not want to force a full load of the
                 * vcpu state, we prevent the EL1 page-table walker to
                 * allocate new TLBs. This is done by setting the EPD bits
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
 static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
                                                  struct tlb_inv_context *cxt)
 {
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                u64 val;
 
                /*
@@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
        isb();
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
                /* Restore the registers to what they were */
                write_sysreg_el1(cxt->tcr, SYS_TCR);
                write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
 {
        write_sysreg(0, vttbr_el2);
 
-       if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+       if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
                /* Ensure write of the host VMID */
                isb();
                /* Restore the host's TCR_EL1 */
index 46822afc57e00461843b56f72cd403a939f17e69..3e909b117f0cd8e5bb704463c363e9ba23fb6041 100644 (file)
@@ -1424,7 +1424,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        ID_SANITISED(ID_ISAR4_EL1),
        ID_SANITISED(ID_ISAR5_EL1),
        ID_SANITISED(ID_MMFR4_EL1),
-       ID_UNALLOCATED(2,7),
+       ID_SANITISED(ID_ISAR6_EL1),
 
        /* CRm=3 */
        ID_SANITISED(MVFR0_EL1),
@@ -2098,9 +2098,9 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
                WARN_ON(1);
        }
 
-       kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
-               cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
-       print_sys_reg_instr(params);
+       print_sys_reg_msg(params,
+                         "Unsupported guest CP%d access at: %08lx [%08lx]\n",
+                         cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
        kvm_inject_undefined(vcpu);
 }
 
@@ -2233,6 +2233,12 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
                                NULL, 0);
 }
 
+static bool is_imp_def_sys_reg(struct sys_reg_params *params)
+{
+       // See ARM DDI 0487E.a, section D12.3.2
+       return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
+}
+
 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
                           struct sys_reg_params *params)
 {
@@ -2248,10 +2254,12 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
 
        if (likely(r)) {
                perform_access(vcpu, params, r);
+       } else if (is_imp_def_sys_reg(params)) {
+               kvm_inject_undefined(vcpu);
        } else {
-               kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
-                       *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
-               print_sys_reg_instr(params);
+               print_sys_reg_msg(params,
+                                 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
+                                 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
                kvm_inject_undefined(vcpu);
        }
        return 1;
@@ -2360,8 +2368,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
        if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
                return NULL;
 
+       if (!index_to_params(id, &params))
+               return NULL;
+
        table = get_target_table(vcpu->arch.target, true, &num);
-       r = find_reg_by_id(id, &params, table, num);
+       r = find_reg(&params, table, num);
        if (!r)
                r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
 
index 9bca0312d7982c4d1a8620ef907ea683abd6c598..5a6fc30f59894db482b667c20dd892baba0e6c7f 100644 (file)
@@ -62,11 +62,24 @@ struct sys_reg_desc {
 #define REG_HIDDEN_USER                (1 << 0) /* hidden from userspace ioctls */
 #define REG_HIDDEN_GUEST       (1 << 1) /* hidden from guest */
 
-static inline void print_sys_reg_instr(const struct sys_reg_params *p)
+static __printf(2, 3)
+inline void print_sys_reg_msg(const struct sys_reg_params *p,
+                                      char *fmt, ...)
 {
+       va_list va;
+
+       va_start(va, fmt);
        /* Look, we even formatted it for you to paste into the table! */
-       kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
+       kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
+                     &(struct va_format){ fmt, &va },
                      p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
+       va_end(va);
+}
+
+static inline void print_sys_reg_instr(const struct sys_reg_params *p)
+{
+       /* GCC warns on an empty format string */
+       print_sys_reg_msg(p, "%s", "");
 }
 
 static inline bool ignore_write(struct kvm_vcpu *vcpu,
index c21b936dc01db20c086f816e0b71fa954319a768..2fc253466dbf8a657796bd1f8fa71a8508ddc23c 100644 (file)
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 lib-y          := clear_user.o delay.o copy_from_user.o                \
                   copy_to_user.o copy_in_user.o copy_page.o            \
-                  clear_page.o memchr.o memcpy.o memmove.o memset.o    \
-                  memcmp.o strcmp.o strncmp.o strlen.o strnlen.o       \
-                  strchr.o strrchr.o tishift.o
+                  clear_page.o csum.o memchr.o memcpy.o memmove.o      \
+                  memset.o memcmp.o strcmp.o strncmp.o strlen.o        \
+                  strnlen.o strchr.o strrchr.o tishift.o
 
 ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
 obj-$(CONFIG_XOR_BLOCKS)       += xor-neon.o
index 78a9ef66288ae6cf6195d40906c7df2cfead1474..073acbf02a7c842520eeb42df0df4f2dd3a15480 100644 (file)
@@ -14,7 +14,7 @@
  * Parameters:
  *     x0 - dest
  */
-ENTRY(clear_page)
+SYM_FUNC_START(clear_page)
        mrs     x1, dczid_el0
        and     w1, w1, #0xf
        mov     x2, #4
@@ -25,5 +25,5 @@ ENTRY(clear_page)
        tst     x0, #(PAGE_SIZE - 1)
        b.ne    1b
        ret
-ENDPROC(clear_page)
+SYM_FUNC_END(clear_page)
 EXPORT_SYMBOL(clear_page)
index aeafc03e961a822c82772a060350fd7728de1de4..48a3a26eff663589a18c3944eff17d1240f66822 100644 (file)
@@ -19,7 +19,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__arch_clear_user)
+SYM_FUNC_START(__arch_clear_user)
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
@@ -40,7 +40,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:     mov     x0, #0
        ret
-ENDPROC(__arch_clear_user)
+SYM_FUNC_END(__arch_clear_user)
 EXPORT_SYMBOL(__arch_clear_user)
 
        .section .fixup,"ax"
index ebb3c06cbb5d8d7645b98881bf0ff2db8d9d1aa6..8e25e89ad01fd7daa41065bae7e1c9745dce986b 100644 (file)
        .endm
 
 end    .req    x5
-ENTRY(__arch_copy_from_user)
+SYM_FUNC_START(__arch_copy_from_user)
        add     end, x0, x2
 #include "copy_template.S"
        mov     x0, #0                          // Nothing to copy
        ret
-ENDPROC(__arch_copy_from_user)
+SYM_FUNC_END(__arch_copy_from_user)
 EXPORT_SYMBOL(__arch_copy_from_user)
 
        .section .fixup,"ax"
index 3d8153a1ebce94800bfe25a50bf4a5e247b9cacf..667139013ed171ef4b5de1ba916941060858475c 100644 (file)
 
 end    .req    x5
 
-ENTRY(__arch_copy_in_user)
+SYM_FUNC_START(__arch_copy_in_user)
        add     end, x0, x2
 #include "copy_template.S"
        mov     x0, #0
        ret
-ENDPROC(__arch_copy_in_user)
+SYM_FUNC_END(__arch_copy_in_user)
 EXPORT_SYMBOL(__arch_copy_in_user)
 
        .section .fixup,"ax"
index bbb8562396afe011add6ad6b33788ae41e6eacd2..e7a793961408d09c5e36991d45e2156f85faaca5 100644 (file)
@@ -17,7 +17,7 @@
  *     x0 - dest
  *     x1 - src
  */
-ENTRY(copy_page)
+SYM_FUNC_START(copy_page)
 alternative_if ARM64_HAS_NO_HW_PREFETCH
        // Prefetch three cache lines ahead.
        prfm    pldl1strm, [x1, #128]
@@ -34,46 +34,46 @@ alternative_else_nop_endif
        ldp     x14, x15, [x1, #96]
        ldp     x16, x17, [x1, #112]
 
-       mov     x18, #(PAGE_SIZE - 128)
+       add     x0, x0, #256
        add     x1, x1, #128
 1:
-       subs    x18, x18, #128
+       tst     x0, #(PAGE_SIZE - 1)
 
 alternative_if ARM64_HAS_NO_HW_PREFETCH
        prfm    pldl1strm, [x1, #384]
 alternative_else_nop_endif
 
-       stnp    x2, x3, [x0]
+       stnp    x2, x3, [x0, #-256]
        ldp     x2, x3, [x1]
-       stnp    x4, x5, [x0, #16]
+       stnp    x4, x5, [x0, #16 - 256]
        ldp     x4, x5, [x1, #16]
-       stnp    x6, x7, [x0, #32]
+       stnp    x6, x7, [x0, #32 - 256]
        ldp     x6, x7, [x1, #32]
-       stnp    x8, x9, [x0, #48]
+       stnp    x8, x9, [x0, #48 - 256]
        ldp     x8, x9, [x1, #48]
-       stnp    x10, x11, [x0, #64]
+       stnp    x10, x11, [x0, #64 - 256]
        ldp     x10, x11, [x1, #64]
-       stnp    x12, x13, [x0, #80]
+       stnp    x12, x13, [x0, #80 - 256]
        ldp     x12, x13, [x1, #80]
-       stnp    x14, x15, [x0, #96]
+       stnp    x14, x15, [x0, #96 - 256]
        ldp     x14, x15, [x1, #96]
-       stnp    x16, x17, [x0, #112]
+       stnp    x16, x17, [x0, #112 - 256]
        ldp     x16, x17, [x1, #112]
 
        add     x0, x0, #128
        add     x1, x1, #128
 
-       b.gt    1b
+       b.ne    1b
 
-       stnp    x2, x3, [x0]
-       stnp    x4, x5, [x0, #16]
-       stnp    x6, x7, [x0, #32]
-       stnp    x8, x9, [x0, #48]
-       stnp    x10, x11, [x0, #64]
-       stnp    x12, x13, [x0, #80]
-       stnp    x14, x15, [x0, #96]
-       stnp    x16, x17, [x0, #112]
+       stnp    x2, x3, [x0, #-256]
+       stnp    x4, x5, [x0, #16 - 256]
+       stnp    x6, x7, [x0, #32 - 256]
+       stnp    x8, x9, [x0, #48 - 256]
+       stnp    x10, x11, [x0, #64 - 256]
+       stnp    x12, x13, [x0, #80 - 256]
+       stnp    x14, x15, [x0, #96 - 256]
+       stnp    x16, x17, [x0, #112 - 256]
 
        ret
-ENDPROC(copy_page)
+SYM_FUNC_END(copy_page)
 EXPORT_SYMBOL(copy_page)
index 357eae2c18ebb1c7a6485482bf409c9cb2b3ae8a..1a104d0089f3a4036574bb6d1c5ac9796740dc5b 100644 (file)
        .endm
 
 end    .req    x5
-ENTRY(__arch_copy_to_user)
+SYM_FUNC_START(__arch_copy_to_user)
        add     end, x0, x2
 #include "copy_template.S"
        mov     x0, #0
        ret
-ENDPROC(__arch_copy_to_user)
+SYM_FUNC_END(__arch_copy_to_user)
 EXPORT_SYMBOL(__arch_copy_to_user)
 
        .section .fixup,"ax"
index e6135f16649b17b256d287907f20894af919c947..243e107e98963b21552ea2cc2ee52e9fc24026e3 100644 (file)
@@ -85,17 +85,17 @@ CPU_BE(     rev16           w3, w3          )
        .endm
 
        .align          5
-ENTRY(crc32_le)
+SYM_FUNC_START(crc32_le)
 alternative_if_not ARM64_HAS_CRC32
        b               crc32_le_base
 alternative_else_nop_endif
        __crc32
-ENDPROC(crc32_le)
+SYM_FUNC_END(crc32_le)
 
        .align          5
-ENTRY(__crc32c_le)
+SYM_FUNC_START(__crc32c_le)
 alternative_if_not ARM64_HAS_CRC32
        b               __crc32c_le_base
 alternative_else_nop_endif
        __crc32         c
-ENDPROC(__crc32c_le)
+SYM_FUNC_END(__crc32c_le)
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
new file mode 100644 (file)
index 0000000..1f82c66
--- /dev/null
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 Arm Ltd.
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <net/checksum.h>
+
+/* Looks dumb, but generates nice-ish code */
+static u64 accumulate(u64 sum, u64 data)
+{
+       __uint128_t tmp = (__uint128_t)sum + data;
+       return tmp + (tmp >> 64);
+}
+
+unsigned int do_csum(const unsigned char *buff, int len)
+{
+       unsigned int offset, shift, sum;
+       const u64 *ptr;
+       u64 data, sum64 = 0;
+
+       if (unlikely(len == 0))
+               return 0;
+
+       offset = (unsigned long)buff & 7;
+       /*
+        * This is to all intents and purposes safe, since rounding down cannot
+        * result in a different page or cache line being accessed, and @buff
+        * should absolutely not be pointing to anything read-sensitive. We do,
+        * however, have to be careful not to piss off KASAN, which means using
+        * unchecked reads to accommodate the head and tail, for which we'll
+        * compensate with an explicit check up-front.
+        */
+       kasan_check_read(buff, len);
+       ptr = (u64 *)(buff - offset);
+       len = len + offset - 8;
+
+       /*
+        * Head: zero out any excess leading bytes. Shifting back by the same
+        * amount should be at least as fast as any other way of handling the
+        * odd/even alignment, and means we can ignore it until the very end.
+        */
+       shift = offset * 8;
+       data = READ_ONCE_NOCHECK(*ptr++);
+#ifdef __LITTLE_ENDIAN
+       data = (data >> shift) << shift;
+#else
+       data = (data << shift) >> shift;
+#endif
+
+       /*
+        * Body: straightforward aligned loads from here on (the paired loads
+        * underlying the quadword type still only need dword alignment). The
+        * main loop strictly excludes the tail, so the second loop will always
+        * run at least once.
+        */
+       while (unlikely(len > 64)) {
+               __uint128_t tmp1, tmp2, tmp3, tmp4;
+
+               tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+               tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
+               tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
+               tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+
+               len -= 64;
+               ptr += 8;
+
+               /* This is the "don't dump the carry flag into a GPR" idiom */
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               tmp2 += (tmp2 >> 64) | (tmp2 << 64);
+               tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+               tmp4 += (tmp4 >> 64) | (tmp4 << 64);
+               tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
+               tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+               tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               tmp1 = ((tmp1 >> 64) << 64) | sum64;
+               tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+               sum64 = tmp1 >> 64;
+       }
+       while (len > 8) {
+               __uint128_t tmp;
+
+               sum64 = accumulate(sum64, data);
+               tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+
+               len -= 16;
+               ptr += 2;
+
+#ifdef __LITTLE_ENDIAN
+               data = tmp >> 64;
+               sum64 = accumulate(sum64, tmp);
+#else
+               data = tmp;
+               sum64 = accumulate(sum64, tmp >> 64);
+#endif
+       }
+       if (len > 0) {
+               sum64 = accumulate(sum64, data);
+               data = READ_ONCE_NOCHECK(*ptr);
+               len -= 8;
+       }
+       /*
+        * Tail: zero any over-read bytes similarly to the head, again
+        * preserving odd/even alignment.
+        */
+       shift = len * -8;
+#ifdef __LITTLE_ENDIAN
+       data = (data << shift) >> shift;
+#else
+       data = (data >> shift) << shift;
+#endif
+       sum64 = accumulate(sum64, data);
+
+       /* Finally, folding */
+       sum64 += (sum64 >> 32) | (sum64 << 32);
+       sum = sum64 >> 32;
+       sum += (sum >> 16) | (sum << 16);
+       if (offset & 1)
+               return (u16)swab32(sum);
+
+       return sum >> 16;
+}
index 48a3ab636e4fb1b12cb0db91c74d55b6e0f10b79..edf6b970a2774374ad79a828eaefd645cf7d2f1e 100644 (file)
@@ -19,7 +19,7 @@
  * Returns:
  *     x0 - address of first occurrence of 'c' or 0
  */
-WEAK(memchr)
+SYM_FUNC_START_WEAK_PI(memchr)
        and     w1, w1, #0xff
 1:     subs    x2, x2, #1
        b.mi    2f
@@ -30,5 +30,5 @@ WEAK(memchr)
        ret
 2:     mov     x0, #0
        ret
-ENDPIPROC(memchr)
+SYM_FUNC_END_PI(memchr)
 EXPORT_SYMBOL_NOKASAN(memchr)
index b297bdaaf5498ba8a30f228b1f87a583e1a37b7f..c0671e793ea9183e5ddc63696d79c8fa2466c2a6 100644 (file)
@@ -46,7 +46,7 @@ pos           .req    x11
 limit_wd       .req    x12
 mask           .req    x13
 
-WEAK(memcmp)
+SYM_FUNC_START_WEAK_PI(memcmp)
        cbz     limit, .Lret0
        eor     tmp1, src1, src2
        tst     tmp1, #7
@@ -243,5 +243,5 @@ CPU_LE( rev data2, data2 )
 .Lret0:
        mov     result, #0
        ret
-ENDPIPROC(memcmp)
+SYM_FUNC_END_PI(memcmp)
 EXPORT_SYMBOL_NOKASAN(memcmp)
index d79f48994dbb2c5564b6387dad106839e9b24796..9f382adfa88a221b481c1ec4bc94ade038e754af 100644 (file)
        .endm
 
        .weak memcpy
-ENTRY(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
+SYM_FUNC_START_PI(memcpy)
 #include "copy_template.S"
        ret
-ENDPIPROC(memcpy)
+SYM_FUNC_END_PI(memcpy)
 EXPORT_SYMBOL(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(__memcpy)
index 784775136480618f9c297116a30fc8bfb05806e3..02cda2e33bde292916830beca52c1d4ef271fd4a 100644 (file)
@@ -46,8 +46,8 @@ D_l   .req    x13
 D_h    .req    x14
 
        .weak memmove
-ENTRY(__memmove)
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_PI(memmove)
        cmp     dstin, src
        b.lo    __memcpy
        add     tmp1, src, count
@@ -184,7 +184,7 @@ ENTRY(memmove)
        tst     count, #0x3f
        b.ne    .Ltail63
        ret
-ENDPIPROC(memmove)
+SYM_FUNC_END_PI(memmove)
 EXPORT_SYMBOL(memmove)
-ENDPROC(__memmove)
+SYM_FUNC_END_ALIAS(__memmove)
 EXPORT_SYMBOL(__memmove)
index 9fb97e6bc56025122e2f0e036b5c957944611714..77c3c7ba008423389aff39923e81225fb4082426 100644 (file)
@@ -43,8 +43,8 @@ tmp3w         .req    w9
 tmp3           .req    x9
 
        .weak memset
-ENTRY(__memset)
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(__memset)
+SYM_FUNC_START_PI(memset)
        mov     dst, dstin      /* Preserve return value.  */
        and     A_lw, val, #255
        orr     A_lw, A_lw, A_lw, lsl #8
@@ -203,7 +203,7 @@ ENTRY(memset)
        ands    count, count, zva_bits_x
        b.ne    .Ltail_maybe_long
        ret
-ENDPIPROC(memset)
+SYM_FUNC_END_PI(memset)
 EXPORT_SYMBOL(memset)
-ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(__memset)
 EXPORT_SYMBOL(__memset)
index ca3ec18171a43a9650fc284a3d1336ad931531c0..1f47eae3b0d6d618d24c347db7c2da9ffce98068 100644 (file)
@@ -18,7 +18,7 @@
  * Returns:
  *     x0 - address of first occurrence of 'c' or 0
  */
-WEAK(strchr)
+SYM_FUNC_START_WEAK(strchr)
        and     w1, w1, #0xff
 1:     ldrb    w2, [x0], #1
        cmp     w2, w1
@@ -28,5 +28,5 @@ WEAK(strchr)
        cmp     w2, w1
        csel    x0, x0, xzr, eq
        ret
-ENDPROC(strchr)
+SYM_FUNC_END(strchr)
 EXPORT_SYMBOL_NOKASAN(strchr)
index e9aefbe0b7401091b63747a6d3f28acbcc48c14e..4767540d1b94ed4bacb2903cbf3904d376c4f5d9 100644 (file)
@@ -48,7 +48,7 @@ tmp3          .req    x9
 zeroones       .req    x10
 pos            .req    x11
 
-WEAK(strcmp)
+SYM_FUNC_START_WEAK_PI(strcmp)
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
        tst     tmp1, #7
@@ -219,5 +219,5 @@ CPU_BE(     orr     syndrome, diff, has_nul )
        lsr     data1, data1, #56
        sub     result, data1, data2, lsr #56
        ret
-ENDPIPROC(strcmp)
+SYM_FUNC_END_PI(strcmp)
 EXPORT_SYMBOL_NOKASAN(strcmp)
index 87b0cb066915fadd7e6b419c01ac8d0a1ed7df82..ee3ed882dd79fbfd4aecdbd6c4b5e1948603011b 100644 (file)
@@ -44,7 +44,7 @@ pos           .req    x12
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
-WEAK(strlen)
+SYM_FUNC_START_WEAK_PI(strlen)
        mov     zeroones, #REP8_01
        bic     src, srcin, #15
        ands    tmp1, srcin, #15
@@ -111,5 +111,5 @@ CPU_LE( lsr tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
        csinv   data1, data1, xzr, le
        csel    data2, data2, data2a, le
        b       .Lrealigned
-ENDPIPROC(strlen)
+SYM_FUNC_END_PI(strlen)
 EXPORT_SYMBOL_NOKASAN(strlen)
index f571581888fa4ae654f669b9b8232cf78a78ad47..2a7ee949ed4714fd376a0913e6cc66bced6391e5 100644 (file)
@@ -52,7 +52,7 @@ limit_wd      .req    x13
 mask           .req    x14
 endloop                .req    x15
 
-WEAK(strncmp)
+SYM_FUNC_START_WEAK_PI(strncmp)
        cbz     limit, .Lret0
        eor     tmp1, src1, src2
        mov     zeroones, #REP8_01
@@ -295,5 +295,5 @@ CPU_BE( orr syndrome, diff, has_nul )
 .Lret0:
        mov     result, #0
        ret
-ENDPIPROC(strncmp)
+SYM_FUNC_END_PI(strncmp)
 EXPORT_SYMBOL_NOKASAN(strncmp)
index c0bac9493c683e50fcafb25b7c79c1337d731a7a..b72913a990389a22be61fc981a730816e9a427b6 100644 (file)
@@ -47,7 +47,7 @@ limit_wd      .req    x14
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
-WEAK(strnlen)
+SYM_FUNC_START_WEAK_PI(strnlen)
        cbz     limit, .Lhit_limit
        mov     zeroones, #REP8_01
        bic     src, srcin, #15
@@ -156,5 +156,5 @@ CPU_LE( lsr tmp2, tmp2, tmp4 )      /* Shift (tmp1 & 63).  */
 .Lhit_limit:
        mov     len, limit
        ret
-ENDPIPROC(strnlen)
+SYM_FUNC_END_PI(strnlen)
 EXPORT_SYMBOL_NOKASAN(strnlen)
index 794ac49ea43330a38bb5a3fd22958f63eb6df9fa..13132d1ed6d127913883f3215a3c0819cbb5598e 100644 (file)
@@ -18,7 +18,7 @@
  * Returns:
  *     x0 - address of last occurrence of 'c' or 0
  */
-WEAK(strrchr)
+SYM_FUNC_START_WEAK_PI(strrchr)
        mov     x3, #0
        and     w1, w1, #0xff
 1:     ldrb    w2, [x0], #1
@@ -29,5 +29,5 @@ WEAK(strrchr)
        b       1b
 2:     mov     x0, x3
        ret
-ENDPIPROC(strrchr)
+SYM_FUNC_END_PI(strrchr)
 EXPORT_SYMBOL_NOKASAN(strrchr)
index 047622536535d4d5698cc3e5cd6b16ad91ec749d..a88613834fb07b350031390cb3ecf4595bc877f5 100644 (file)
@@ -7,7 +7,7 @@
 
 #include <asm/assembler.h>
 
-ENTRY(__ashlti3)
+SYM_FUNC_START(__ashlti3)
        cbz     x2, 1f
        mov     x3, #64
        sub     x3, x3, x2
@@ -26,10 +26,10 @@ ENTRY(__ashlti3)
        lsl     x1, x0, x1
        mov     x0, x2
        ret
-ENDPROC(__ashlti3)
+SYM_FUNC_END(__ashlti3)
 EXPORT_SYMBOL(__ashlti3)
 
-ENTRY(__ashrti3)
+SYM_FUNC_START(__ashrti3)
        cbz     x2, 1f
        mov     x3, #64
        sub     x3, x3, x2
@@ -48,10 +48,10 @@ ENTRY(__ashrti3)
        asr     x0, x1, x0
        mov     x1, x2
        ret
-ENDPROC(__ashrti3)
+SYM_FUNC_END(__ashrti3)
 EXPORT_SYMBOL(__ashrti3)
 
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
        cbz     x2, 1f
        mov     x3, #64
        sub     x3, x3, x2
@@ -70,5 +70,5 @@ ENTRY(__lshrti3)
        lsr     x0, x1, x0
        mov     x1, x2
        ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__lshrti3)
 EXPORT_SYMBOL(__lshrti3)
index db767b072601e36fddb8ee7991d801d6f1f2f6d6..2d881f34dd9d54017c7565cc5749d88084ab4235 100644 (file)
@@ -24,7 +24,7 @@
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-ENTRY(__flush_icache_range)
+SYM_FUNC_START(__flush_icache_range)
        /* FALLTHROUGH */
 
 /*
@@ -37,7 +37,7 @@ ENTRY(__flush_icache_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-ENTRY(__flush_cache_user_range)
+SYM_FUNC_START(__flush_cache_user_range)
        uaccess_ttbr0_enable x2, x3, x4
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
@@ -66,8 +66,8 @@ alternative_else_nop_endif
 9:
        mov     x0, #-EFAULT
        b       1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+SYM_FUNC_END(__flush_icache_range)
+SYM_FUNC_END(__flush_cache_user_range)
 
 /*
  *     invalidate_icache_range(start,end)
@@ -77,7 +77,7 @@ ENDPROC(__flush_cache_user_range)
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-ENTRY(invalidate_icache_range)
+SYM_FUNC_START(invalidate_icache_range)
 alternative_if ARM64_HAS_CACHE_DIC
        mov     x0, xzr
        isb
@@ -94,7 +94,7 @@ alternative_else_nop_endif
 2:
        mov     x0, #-EFAULT
        b       1b
-ENDPROC(invalidate_icache_range)
+SYM_FUNC_END(invalidate_icache_range)
 
 /*
  *     __flush_dcache_area(kaddr, size)
@@ -105,10 +105,10 @@ ENDPROC(invalidate_icache_range)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__flush_dcache_area)
+SYM_FUNC_START_PI(__flush_dcache_area)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__flush_dcache_area)
+SYM_FUNC_END_PI(__flush_dcache_area)
 
 /*
  *     __clean_dcache_area_pou(kaddr, size)
@@ -119,14 +119,14 @@ ENDPIPROC(__flush_dcache_area)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__clean_dcache_area_pou)
+SYM_FUNC_START(__clean_dcache_area_pou)
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        ret
 alternative_else_nop_endif
        dcache_by_line_op cvau, ish, x0, x1, x2, x3
        ret
-ENDPROC(__clean_dcache_area_pou)
+SYM_FUNC_END(__clean_dcache_area_pou)
 
 /*
  *     __inval_dcache_area(kaddr, size)
@@ -138,7 +138,8 @@ ENDPROC(__clean_dcache_area_pou)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__inval_dcache_area)
+SYM_FUNC_START_LOCAL(__dma_inv_area)
+SYM_FUNC_START_PI(__inval_dcache_area)
        /* FALLTHROUGH */
 
 /*
@@ -146,7 +147,6 @@ ENTRY(__inval_dcache_area)
  *     - start   - virtual start address of region
  *     - size    - size in question
  */
-__dma_inv_area:
        add     x1, x1, x0
        dcache_line_size x2, x3
        sub     x3, x2, #1
@@ -165,8 +165,8 @@ __dma_inv_area:
        b.lo    2b
        dsb     sy
        ret
-ENDPIPROC(__inval_dcache_area)
-ENDPROC(__dma_inv_area)
+SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END(__dma_inv_area)
 
 /*
  *     __clean_dcache_area_poc(kaddr, size)
@@ -177,7 +177,8 @@ ENDPROC(__dma_inv_area)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__clean_dcache_area_poc)
+SYM_FUNC_START_LOCAL(__dma_clean_area)
+SYM_FUNC_START_PI(__clean_dcache_area_poc)
        /* FALLTHROUGH */
 
 /*
@@ -185,11 +186,10 @@ ENTRY(__clean_dcache_area_poc)
  *     - start   - virtual start address of region
  *     - size    - size in question
  */
-__dma_clean_area:
        dcache_by_line_op cvac, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__clean_dcache_area_poc)
-ENDPROC(__dma_clean_area)
+SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END(__dma_clean_area)
 
 /*
  *     __clean_dcache_area_pop(kaddr, size)
@@ -200,13 +200,13 @@ ENDPROC(__dma_clean_area)
  *     - kaddr   - kernel address
  *     - size    - size in question
  */
-ENTRY(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(__clean_dcache_area_pop)
        alternative_if_not ARM64_HAS_DCPOP
        b       __clean_dcache_area_poc
        alternative_else_nop_endif
        dcache_by_line_op cvap, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(__clean_dcache_area_pop)
 
 /*
  *     __dma_flush_area(start, size)
@@ -216,10 +216,10 @@ ENDPIPROC(__clean_dcache_area_pop)
  *     - start   - virtual start address of region
  *     - size    - size in question
  */
-ENTRY(__dma_flush_area)
+SYM_FUNC_START_PI(__dma_flush_area)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-ENDPIPROC(__dma_flush_area)
+SYM_FUNC_END_PI(__dma_flush_area)
 
 /*
  *     __dma_map_area(start, size, dir)
@@ -227,11 +227,11 @@ ENDPIPROC(__dma_flush_area)
  *     - size  - size of region
  *     - dir   - DMA direction
  */
-ENTRY(__dma_map_area)
+SYM_FUNC_START_PI(__dma_map_area)
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_area
        b       __dma_clean_area
-ENDPIPROC(__dma_map_area)
+SYM_FUNC_END_PI(__dma_map_area)
 
 /*
  *     __dma_unmap_area(start, size, dir)
@@ -239,8 +239,8 @@ ENDPIPROC(__dma_map_area)
  *     - size  - size of region
  *     - dir   - DMA direction
  */
-ENTRY(__dma_unmap_area)
+SYM_FUNC_START_PI(__dma_unmap_area)
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_area
        ret
-ENDPIPROC(__dma_unmap_area)
+SYM_FUNC_END_PI(__dma_unmap_area)
index b5e329fde2dd31ea5d7591deb499a4ac901c24c6..8ef73e89d51485950b0bc05689c64120b8fedebc 100644 (file)
@@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending;
 #define ASID_MASK              (~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION     (1UL << asid_bits)
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS         (ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid)         (((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx)          (((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS         (ASID_FIRST_VERSION)
+#define NUM_USER_ASIDS         ASID_FIRST_VERSION
 #define asid2idx(asid)         ((asid) & ~ASID_MASK)
 #define idx2asid(idx)          asid2idx(idx)
-#endif
 
 /* Get the ASIDBits supported by the current CPU */
 static u32 get_cpu_asid_bits(void)
@@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void)
        }
 }
 
+static void set_kpti_asid_bits(void)
+{
+       unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+       /*
+        * In case of KPTI kernel/user ASIDs are allocated in
+        * pairs, the bottom bit distinguishes the two: if it
+        * is set, then the ASID will map only userspace. Thus
+        * mark even as reserved for kernel.
+        */
+       memset(asid_map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+       if (arm64_kernel_unmapped_at_el0())
+               set_kpti_asid_bits();
+       else
+               bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
 static void flush_context(void)
 {
        int i;
        u64 asid;
 
        /* Update the list of reserved ASIDs and the ASID bitmap. */
-       bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+       set_reserved_asid_bits();
 
        for_each_possible_cpu(i) {
                asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
@@ -261,6 +275,14 @@ static int asids_init(void)
                panic("Failed to allocate bitmap for %lu ASIDs\n",
                      NUM_USER_ASIDS);
 
+       /*
+        * We cannot call set_reserved_asid_bits() here because CPU
+        * caps are not finalized yet, so it is safer to assume KPTI
+        * and reserve kernel ASID's from beginning.
+        */
+       if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+               set_kpti_asid_bits();
+
        pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
        return 0;
 }
index 077b02a2d4d3333e9af010f6c92a011b60f0bd1a..85566d32958f5b8fcc5bdc35ebc2d9d2b2d0469a 100644 (file)
@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        const struct fault_info *inf;
        struct mm_struct *mm = current->mm;
        vm_fault_t fault, major = 0;
-       unsigned long vm_flags = VM_READ | VM_WRITE;
+       unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
        unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        if (kprobe_page_fault(regs, esr))
index 5a3b15a14a7f116bc6b61a9279ed5ce84723702e..40797cbfba2d67412eee7864325e9f89adeb08d2 100644 (file)
@@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
        /*
         * FIXME: Cleanup page tables (also in arch_add_memory() in case
@@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
         * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
         * unlocked yet.
         */
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
index 9ce7bd9d4d9ceaf3de79ce4ae67318f2988abdb8..250c49008d73c1fefdd4aa9dfdf7797426d96fde 100644 (file)
@@ -54,7 +54,7 @@ static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr;
-       unsigned long size = PAGE_SIZE*numpages;
+       unsigned long size = PAGE_SIZE * numpages;
        unsigned long end = start + size;
        struct vm_struct *area;
        int i;
index a1e0592d1fbcd8833e41b2486869d7ae9898981b..aafed690241147f661b000532d4d0b788b37b04d 100644 (file)
 #define TCR_KASAN_FLAGS 0
 #endif
 
-#define MAIR(attr, mt) ((attr) << ((mt) * 8))
+/* Default MAIR_EL1 */
+#define MAIR_EL1_SET                                                   \
+       (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) |      \
+        MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) |        \
+        MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) |            \
+        MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) |              \
+        MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) |                    \
+        MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
 
 #ifdef CONFIG_CPU_PM
 /**
@@ -50,7 +57,7 @@
  *
  * x0: virtual address of context pointer
  */
-ENTRY(cpu_do_suspend)
+SYM_FUNC_START(cpu_do_suspend)
        mrs     x2, tpidr_el0
        mrs     x3, tpidrro_el0
        mrs     x4, contextidr_el1
@@ -74,7 +81,7 @@ alternative_endif
        stp     x10, x11, [x0, #64]
        stp     x12, x13, [x0, #80]
        ret
-ENDPROC(cpu_do_suspend)
+SYM_FUNC_END(cpu_do_suspend)
 
 /**
  * cpu_do_resume - restore CPU register context
@@ -82,7 +89,7 @@ ENDPROC(cpu_do_suspend)
  * x0: Address of context pointer
  */
        .pushsection ".idmap.text", "awx"
-ENTRY(cpu_do_resume)
+SYM_FUNC_START(cpu_do_resume)
        ldp     x2, x3, [x0]
        ldp     x4, x5, [x0, #16]
        ldp     x6, x8, [x0, #32]
@@ -131,7 +138,7 @@ alternative_else_nop_endif
 
        isb
        ret
-ENDPROC(cpu_do_resume)
+SYM_FUNC_END(cpu_do_resume)
        .popsection
 #endif
 
@@ -142,7 +149,7 @@ ENDPROC(cpu_do_resume)
  *
  *     - pgd_phys - physical address of new TTB
  */
-ENTRY(cpu_do_switch_mm)
+SYM_FUNC_START(cpu_do_switch_mm)
        mrs     x2, ttbr1_el1
        mmid    x1, x1                          // get mm->context.id
        phys_to_ttbr x3, x0
@@ -161,7 +168,7 @@ alternative_else_nop_endif
        msr     ttbr0_el1, x3                   // now update TTBR0
        isb
        b       post_ttbr_update_workaround     // Back to C code...
-ENDPROC(cpu_do_switch_mm)
+SYM_FUNC_END(cpu_do_switch_mm)
 
        .pushsection ".idmap.text", "awx"
 
@@ -182,7 +189,7 @@ ENDPROC(cpu_do_switch_mm)
  * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
  * called by anything else. It can only be executed from a TTBR0 mapping.
  */
-ENTRY(idmap_cpu_replace_ttbr1)
+SYM_FUNC_START(idmap_cpu_replace_ttbr1)
        save_and_disable_daif flags=x2
 
        __idmap_cpu_set_reserved_ttbr1 x1, x3
@@ -194,7 +201,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
        restore_daif x2
 
        ret
-ENDPROC(idmap_cpu_replace_ttbr1)
+SYM_FUNC_END(idmap_cpu_replace_ttbr1)
        .popsection
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
@@ -222,7 +229,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
  */
 __idmap_kpti_flag:
        .long   1
-ENTRY(idmap_kpti_install_ng_mappings)
+SYM_FUNC_START(idmap_kpti_install_ng_mappings)
        cpu             .req    w0
        num_cpus        .req    w1
        swapper_pa      .req    x2
@@ -250,15 +257,15 @@ ENTRY(idmap_kpti_install_ng_mappings)
        /* We're the boot CPU. Wait for the others to catch up */
        sevl
 1:     wfe
-       ldaxr   w18, [flag_ptr]
-       eor     w18, w18, num_cpus
-       cbnz    w18, 1b
+       ldaxr   w17, [flag_ptr]
+       eor     w17, w17, num_cpus
+       cbnz    w17, 1b
 
        /* We need to walk swapper, so turn off the MMU. */
        pre_disable_mmu_workaround
-       mrs     x18, sctlr_el1
-       bic     x18, x18, #SCTLR_ELx_M
-       msr     sctlr_el1, x18
+       mrs     x17, sctlr_el1
+       bic     x17, x17, #SCTLR_ELx_M
+       msr     sctlr_el1, x17
        isb
 
        /* Everybody is enjoying the idmap, so we can rewrite swapper. */
@@ -281,9 +288,9 @@ skip_pgd:
        isb
 
        /* We're done: fire up the MMU again */
-       mrs     x18, sctlr_el1
-       orr     x18, x18, #SCTLR_ELx_M
-       msr     sctlr_el1, x18
+       mrs     x17, sctlr_el1
+       orr     x17, x17, #SCTLR_ELx_M
+       msr     sctlr_el1, x17
        isb
 
        /*
@@ -353,47 +360,48 @@ skip_pte:
        b.ne    do_pte
        b       next_pmd
 
+       .unreq  cpu
+       .unreq  num_cpus
+       .unreq  swapper_pa
+       .unreq  cur_pgdp
+       .unreq  end_pgdp
+       .unreq  pgd
+       .unreq  cur_pudp
+       .unreq  end_pudp
+       .unreq  pud
+       .unreq  cur_pmdp
+       .unreq  end_pmdp
+       .unreq  pmd
+       .unreq  cur_ptep
+       .unreq  end_ptep
+       .unreq  pte
+
        /* Secondary CPUs end up here */
 __idmap_kpti_secondary:
        /* Uninstall swapper before surgery begins */
-       __idmap_cpu_set_reserved_ttbr1 x18, x17
+       __idmap_cpu_set_reserved_ttbr1 x16, x17
 
        /* Increment the flag to let the boot CPU we're ready */
-1:     ldxr    w18, [flag_ptr]
-       add     w18, w18, #1
-       stxr    w17, w18, [flag_ptr]
+1:     ldxr    w16, [flag_ptr]
+       add     w16, w16, #1
+       stxr    w17, w16, [flag_ptr]
        cbnz    w17, 1b
 
        /* Wait for the boot CPU to finish messing around with swapper */
        sevl
 1:     wfe
-       ldxr    w18, [flag_ptr]
-       cbnz    w18, 1b
+       ldxr    w16, [flag_ptr]
+       cbnz    w16, 1b
 
        /* All done, act like nothing happened */
-       offset_ttbr1 swapper_ttb, x18
+       offset_ttbr1 swapper_ttb, x16
        msr     ttbr1_el1, swapper_ttb
        isb
        ret
 
-       .unreq  cpu
-       .unreq  num_cpus
-       .unreq  swapper_pa
        .unreq  swapper_ttb
        .unreq  flag_ptr
-       .unreq  cur_pgdp
-       .unreq  end_pgdp
-       .unreq  pgd
-       .unreq  cur_pudp
-       .unreq  end_pudp
-       .unreq  pud
-       .unreq  cur_pmdp
-       .unreq  end_pmdp
-       .unreq  pmd
-       .unreq  cur_ptep
-       .unreq  end_ptep
-       .unreq  pte
-ENDPROC(idmap_kpti_install_ng_mappings)
+SYM_FUNC_END(idmap_kpti_install_ng_mappings)
        .popsection
 #endif
 
@@ -404,7 +412,7 @@ ENDPROC(idmap_kpti_install_ng_mappings)
  *     value of the SCTLR_EL1 register.
  */
        .pushsection ".idmap.text", "awx"
-ENTRY(__cpu_setup)
+SYM_FUNC_START(__cpu_setup)
        tlbi    vmalle1                         // Invalidate local TLB
        dsb     nsh
 
@@ -416,23 +424,9 @@ ENTRY(__cpu_setup)
        enable_dbg                              // since this is per-cpu
        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
        /*
-        * Memory region attributes for LPAE:
-        *
-        *   n = AttrIndx[2:0]
-        *                      n       MAIR
-        *   DEVICE_nGnRnE      000     00000000
-        *   DEVICE_nGnRE       001     00000100
-        *   DEVICE_GRE         010     00001100
-        *   NORMAL_NC          011     01000100
-        *   NORMAL             100     11111111
-        *   NORMAL_WT          101     10111011
+        * Memory region attributes
         */
-       ldr     x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
-                    MAIR(0x04, MT_DEVICE_nGnRE) | \
-                    MAIR(0x0c, MT_DEVICE_GRE) | \
-                    MAIR(0x44, MT_NORMAL_NC) | \
-                    MAIR(0xff, MT_NORMAL) | \
-                    MAIR(0xbb, MT_NORMAL_WT)
+       mov_q   x5, MAIR_EL1_SET
        msr     mair_el1, x5
        /*
         * Prepare SCTLR
@@ -475,4 +469,4 @@ ENTRY(__cpu_setup)
 #endif /* CONFIG_ARM64_HW_AFDBM */
        msr     tcr_el1, x10
        ret                                     // return to head.S
-ENDPROC(__cpu_setup)
+SYM_FUNC_END(__cpu_setup)
index c5f05c4a4d00883422ed6e211135302cff3be14f..5b09aca551085ee1889cce9f1af80962abaab84d 100644 (file)
 #define XEN_IMM 0xEA1
 
 #define HYPERCALL_SIMPLE(hypercall)            \
-ENTRY(HYPERVISOR_##hypercall)                  \
+SYM_FUNC_START(HYPERVISOR_##hypercall)         \
        mov x16, #__HYPERVISOR_##hypercall;     \
        hvc XEN_IMM;                            \
        ret;                                    \
-ENDPROC(HYPERVISOR_##hypercall)
+SYM_FUNC_END(HYPERVISOR_##hypercall)
 
 #define HYPERCALL0 HYPERCALL_SIMPLE
 #define HYPERCALL1 HYPERCALL_SIMPLE
@@ -86,7 +86,7 @@ HYPERCALL2(multicall);
 HYPERCALL2(vm_assist);
 HYPERCALL3(dm_op);
 
-ENTRY(privcmd_call)
+SYM_FUNC_START(privcmd_call)
        mov x16, x0
        mov x0, x1
        mov x1, x2
@@ -109,4 +109,4 @@ ENTRY(privcmd_call)
         */
        uaccess_ttbr0_disable x6, x7
        ret
-ENDPROC(privcmd_call);
+SYM_FUNC_END(privcmd_call);
diff --git a/arch/c6x/include/asm/vmalloc.h b/arch/c6x/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..26c6c66
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_C6X_VMALLOC_H
+#define _ASM_C6X_VMALLOC_H
+
+#endif /* _ASM_C6X_VMALLOC_H */
index 4332a10aec6c79e137d8d4166328978ee6598366..fb154d19625bc0be53c4345e14bedfbaf8f9e6d2 100644 (file)
@@ -18,7 +18,7 @@
 #define DP     B14
 #define SP     B15
 
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
 #define resume_kernel restore_all
 #endif
 
@@ -287,7 +287,7 @@ work_notifysig:
        ;; is a little bit different
        ;;
 ENTRY(ret_from_exception)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        MASK_INT B2
 #endif
 
@@ -557,7 +557,7 @@ ENDPROC(_nmi_handler)
        ;;
        ;; Jump to schedule() then return to ret_from_isr
        ;;
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 resume_kernel:
        GET_THREAD_INFO A12
        LDW     .D1T1   *+A12(THREAD_INFO_PREEMPT_COUNT),A1
@@ -582,7 +582,7 @@ preempt_schedule:
        B       .S2     preempt_schedule_irq
 #endif
        ADDKPC  .S2     preempt_schedule,B3,4
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
 ENTRY(enable_exception)
        DINT
diff --git a/arch/csky/include/asm/vmalloc.h b/arch/csky/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..43dca63
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_CSKY_VMALLOC_H
+#define _ASM_CSKY_VMALLOC_H
+
+#endif /* _ASM_CSKY_VMALLOC_H */
index a7a5b67df8989b96254f49110235176222b3fb7c..00770632800005baebc0525a0eb887f48e118de6 100644 (file)
@@ -277,7 +277,7 @@ ENTRY(csky_irq)
        zero_fp
        psrset  ee
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        mov     r9, sp                  /* Get current stack  pointer */
        bmaski  r10, THREAD_SHIFT
        andn    r9, r10                 /* Get thread_info */
@@ -294,7 +294,7 @@ ENTRY(csky_irq)
        mov     a0, sp
        jbsr    csky_do_IRQ
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        subi    r12, 1
        stw     r12, (r9, TINFO_PREEMPT)
        cmpnei  r12, 0
diff --git a/arch/h8300/include/asm/vmalloc.h b/arch/h8300/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..08a55c1
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_H8300_VMALLOC_H
+#define _ASM_H8300_VMALLOC_H
+
+#endif /* _ASM_H8300_VMALLOC_H */
index 4ade5f8299baed742c24b839d5bb3bec867b3d78..c6e289b5f1f284f4a5b3c11b8acba29bab3fa4c6 100644 (file)
@@ -284,12 +284,12 @@ badsys:
        mov.l   er0,@(LER0:16,sp)
        bra     resume_userspace
 
-#if !defined(CONFIG_PREEMPT)
+#if !defined(CONFIG_PREEMPTION)
 #define resume_kernel restore_all
 #endif
 
 ret_from_exception:
-#if defined(CONFIG_PREEMPT)
+#if defined(CONFIG_PREEMPTION)
        orc     #0xc0,ccr
 #endif
 ret_from_interrupt:
@@ -319,7 +319,7 @@ work_resched:
 restore_all:
        RESTORE_ALL                     /* Does RTE */
 
-#if defined(CONFIG_PREEMPT)
+#if defined(CONFIG_PREEMPTION)
 resume_kernel:
        mov.l   @(TI_PRE_COUNT:16,er4),er0
        bne     restore_all:8
index 12cd9231c4b8fb33d7687a68fd593271a632c946..0231d69c8bf2bf35285a21a3117c5a8f2ab26f1a 100644 (file)
@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
                "1:     %0 = memw_locked(%1);\n"                        \
                "       %0 = "#op "(%0,%2);\n"                          \
                "       memw_locked(%1,P3)=%0;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output)                                        \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
                "1:     %0 = memw_locked(%1);\n"                        \
                "       %0 = "#op "(%0,%2);\n"                          \
                "       memw_locked(%1,P3)=%0;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output)                                        \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                     \
                "1:     %0 = memw_locked(%2);\n"                        \
                "       %1 = "#op "(%0,%3);\n"                          \
                "       memw_locked(%2,P3)=%1;\n"                       \
-               "       if !P3 jump 1b;\n"                              \
+               "       if (!P3) jump 1b;\n"                            \
                : "=&r" (output), "=&r" (val)                           \
                : "r" (&v->counter), "r" (i)                            \
                : "memory", "p3"                                        \
@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
                "       }"
                "       memw_locked(%2, p3) = %1;"
                "       {"
-               "               if !p3 jump 1b;"
+               "               if (!p3) jump 1b;"
                "       }"
                "2:"
                : "=&r" (__oldval), "=&r" (tmp)
index 47384b094b9445598bb2f859f26bba81e444a396..71429f756af0f45a846598529055b34b1d70f269 100644 (file)
@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
        "1:     R12 = memw_locked(R10);\n"
        "       { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
        "       memw_locked(R10,P1) = R12;\n"
-       "       {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+       "       {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
        : "=&r" (oldval)
        : "r" (addr), "r" (nr)
        : "r10", "r11", "r12", "p0", "p1", "memory"
@@ -223,7 +223,7 @@ static inline int ffs(int x)
        int r;
 
        asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
-               "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
+               "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
                : "=&r" (r)
                : "r" (x)
                : "p0");
index 6091322c3af9639f2eaa144c5842f41e5a08e123..92b8a02e588ac256341a82053df70340089fd34b 100644 (file)
@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
        __asm__ __volatile__ (
        "1:     %0 = memw_locked(%1);\n"    /*  load into retval */
        "       memw_locked(%1,P0) = %2;\n" /*  store into memory */
-       "       if !P0 jump 1b;\n"
+       "       if (!P0) jump 1b;\n"
        : "=&r" (retval)
        : "r" (ptr), "r" (x)
        : "memory", "p0"
index cb635216a732c98c2d05e965762696543d7ec3e4..0191f7c7193e6bd2a8700d2813627b60a701a419 100644 (file)
@@ -16,7 +16,7 @@
            /* For example: %1 = %4 */ \
            insn \
        "2: memw_locked(%3,p2) = %1;\n" \
-       "   if !p2 jump 1b;\n" \
+       "   if (!p2) jump 1b;\n" \
        "   %1 = #0;\n" \
        "3:\n" \
        ".section .fixup,\"ax\"\n" \
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        "1: %1 = memw_locked(%3)\n"
        "   {\n"
        "      p2 = cmp.eq(%1,%4)\n"
-       "      if !p2.new jump:NT 3f\n"
+       "      if (!p2.new) jump:NT 3f\n"
        "   }\n"
        "2: memw_locked(%3,p2) = %5\n"
-       "   if !p2 jump 1b\n"
+       "   if (!p2) jump 1b\n"
        "3:\n"
        ".section .fixup,\"ax\"\n"
        "4: %0 = #%6\n"
index 539e3efcf39c6ed595fea142a39384340544e917..bda2a9c2df78894f52d60e505fea72403816b126 100644 (file)
@@ -172,7 +172,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
 #define writel_relaxed __raw_writel
 
 void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
-#define ioremap_nocache ioremap
+#define ioremap_uc(X, Y) ioremap((X), (Y))
 
 
 #define __raw_writel writel
index bfe07d842ff35c4ac70b9e9d96e2b3ffe9789a8d..ef103b73bec8388df8258e4ac132cbc1db4ebfbc 100644 (file)
@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0);\n"
                "       { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
                "1:     R6 = memw_locked(%0);\n"
                "       R6 = add(R6,#-1);\n"
                "       memw_locked(%0,P3) = R6\n"
-               "       if !P3 jump 1b;\n"
+               "       if (!P3) jump 1b;\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1);\n"
                "       { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
-               "       { if !P3 jump 1f; }\n"
+               "       { if (!P3) jump 1f; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       { %0 = P3 }\n"
                "1:\n"
@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0)\n"
                "       { P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1)\n"
                "       { %0 = #0; P3 = cmp.eq(R6,#0);  R6 = #-1;}\n"
-               "       { if !P3 jump 1f; }\n"
+               "       { if (!P3) jump 1f; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       %0 = P3;\n"
                "1:\n"
@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        __asm__ __volatile__(
                "1:     R6 = memw_locked(%0);\n"
                "       P3 = cmp.eq(R6,#0);\n"
-               "       { if !P3 jump 1b; R6 = #1; }\n"
+               "       { if (!P3) jump 1b; R6 = #1; }\n"
                "       memw_locked(%0,P3) = R6;\n"
-               "       { if !P3 jump 1b; }\n"
+               "       { if (!P3) jump 1b; }\n"
                :
                : "r" (&lock->lock)
                : "memory", "r6", "p3"
@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
        __asm__ __volatile__(
                "       R6 = memw_locked(%1);\n"
                "       P3 = cmp.eq(R6,#0);\n"
-               "       { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
+               "       { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
                "       memw_locked(%1,P3) = R6;\n"
                "       %0 = P3;\n"
                "1:\n"
diff --git a/arch/hexagon/include/asm/vmalloc.h b/arch/hexagon/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..7b04609
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_HEXAGON_VMALLOC_H
+#define _ASM_HEXAGON_VMALLOC_H
+
+#endif /* _ASM_HEXAGON_VMALLOC_H */
index 35f29423fda808f6edd8202da932ce89ae728eae..5ed02f699479ad68c516444dceae7223452c1e0c 100644 (file)
@@ -11,8 +11,6 @@
 #include <linux/thread_info.h>
 #include <linux/module.h>
 
-register unsigned long current_frame_pointer asm("r30");
-
 struct stackframe {
        unsigned long fp;
        unsigned long rets;
@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
 
        low = (unsigned long)task_stack_page(current);
        high = low + THREAD_SIZE;
-       fp = current_frame_pointer;
+       fp = (unsigned long)__builtin_frame_address(0);
 
        while (fp >= low && fp <= (high - sizeof(*frame))) {
                frame = (struct stackframe *)fp;
index 12242c27e2df59257cd60fadbd24480e81d365d8..554371d92bed690aa127016db8469e9752f295b4 100644 (file)
@@ -265,12 +265,12 @@ event_dispatch:
         * should be in the designated register (usually R19)
         *
         * If we were in kernel mode, we don't need to check scheduler
-        * or signals if CONFIG_PREEMPT is not set.  If set, then it has
+        * or signals if CONFIG_PREEMPTION is not set.  If set, then it has
         * to jump to a need_resched kind of block.
-        * BTW, CONFIG_PREEMPT is not supported yet.
+        * BTW, CONFIG_PREEMPTION is not supported yet.
         */
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        R0 = #VM_INT_DISABLE
        trap1(#HVM_TRAP1_VMSETIE)
 #endif
@@ -369,7 +369,7 @@ ret_from_fork:
                R26.L = #LO(do_work_pending);
                R0 = #VM_INT_DISABLE;
        }
-       if P0 jump check_work_pending
+       if (P0) jump check_work_pending
        {
                R0 = R25;
                callr R24
index f886d4dc9d55297f6548ae45f53dc4d48c42ef01..b66ba907019ce11d6304b041650a968bc5c8d69d 100644 (file)
@@ -38,7 +38,10 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
 /* Low-level suspend routine. */
 extern int acpi_suspend_lowlevel(void);
 
-extern unsigned long acpi_wakeup_address;
+static inline unsigned long acpi_get_wakeup_address(void)
+{
+       return 0;
+}
 
 /*
  * Record the cpei override flag and current logical cpu. This is
index 30cb373f3de8a3c3602c6a9495421edb2e639303..64ce0b971a0a1df07417a514e8031e62522ae67a 100644 (file)
@@ -18,7 +18,7 @@
 extern unsigned long vga_console_iobase;
 extern unsigned long vga_console_membase;
 
-#define VGA_MAP_MEM(x,s)       ((unsigned long) ioremap_nocache(vga_console_membase + (x), s))
+#define VGA_MAP_MEM(x,s)       ((unsigned long) ioremap(vga_console_membase + (x), s))
 
 #define vga_readb(x)   (*(x))
 #define vga_writeb(x,y)        (*(y) = (x))
diff --git a/arch/ia64/include/asm/vmalloc.h b/arch/ia64/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..a2b5114
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_IA64_VMALLOC_H
+#define _ASM_IA64_VMALLOC_H
+
+#endif /* _ASM_IA64_VMALLOC_H */
index 70d1587ddcd4676030bc3ce8d5cf51cc01a2cc71..a5636524af7693a5f7900c3e92f87a31d9e0323c 100644 (file)
@@ -42,8 +42,6 @@ int acpi_lapic;
 unsigned int acpi_cpei_override;
 unsigned int acpi_cpei_phys_cpuid;
 
-unsigned long acpi_wakeup_address = 0;
-
 #define ACPI_MAX_PLATFORM_INTERRUPTS   256
 
 /* Array to record platform interrupt vectors for generic interrupt routing. */
index f80eb7fb544d71f80ff852c6333d6cdd7fb22a5c..258d7b70c0f38534cb9f8ff8ccaa0e1e8827fa53 100644 (file)
@@ -50,7 +50,7 @@ int __init init_cyclone_clock(void)
 
        /* find base address */
        offset = (CYCLONE_CBAR_ADDR);
-       reg = ioremap_nocache(offset, sizeof(u64));
+       reg = ioremap(offset, sizeof(u64));
        if(!reg){
                printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
                                " register.\n");
@@ -68,7 +68,7 @@ int __init init_cyclone_clock(void)
 
        /* setup PMCC */
        offset = (base + CYCLONE_PMCC_OFFSET);
-       reg = ioremap_nocache(offset, sizeof(u64));
+       reg = ioremap(offset, sizeof(u64));
        if(!reg){
                printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
                                " register.\n");
@@ -80,7 +80,7 @@ int __init init_cyclone_clock(void)
 
        /* setup MPCS */
        offset = (base + CYCLONE_MPCS_OFFSET);
-       reg = ioremap_nocache(offset, sizeof(u64));
+       reg = ioremap(offset, sizeof(u64));
        if(!reg){
                printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
                                " register.\n");
@@ -92,7 +92,7 @@ int __init init_cyclone_clock(void)
 
        /* map in cyclone_timer */
        offset = (base + CYCLONE_MPMC_OFFSET);
-       cyclone_timer = ioremap_nocache(offset, sizeof(u32));
+       cyclone_timer = ioremap(offset, sizeof(u32));
        if(!cyclone_timer){
                printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
                                " register.\n");
index a9992be5718b88a9e7e31e00db188fb03baeadfe..2ac92633150009fc283747c010e1745fcb7b6b9e 100644 (file)
@@ -670,12 +670,12 @@ GLOBAL_ENTRY(ia64_leave_syscall)
         *
         * p6 controls whether current_thread_info()->flags needs to be check for
         * extra work.  We always check for extra work when returning to user-level.
-        * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
+        * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
         * is 0.  After extra work processing has been completed, execution
         * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
         * needs to be redone.
         */
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        RSM_PSR_I(p0, r2, r18)                  // disable interrupts
        cmp.eq pLvSys,p0=r0,r0                  // pLvSys=1: leave from syscall
 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
@@ -685,7 +685,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
 (pUStk)        mov r21=0                       // r21 <- 0
        ;;
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
-#else /* !CONFIG_PREEMPT */
+#else /* !CONFIG_PREEMPTION */
        RSM_PSR_I(pUStk, r2, r18)
        cmp.eq pLvSys,p0=r0,r0          // pLvSys=1: leave from syscall
 (pUStk)        cmp.eq.unc p6,p0=r0,r0          // p6 <- pUStk
@@ -814,12 +814,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
         *
         * p6 controls whether current_thread_info()->flags needs to be check for
         * extra work.  We always check for extra work when returning to user-level.
-        * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
+        * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
         * is 0.  After extra work processing has been completed, execution
         * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
         * needs to be redone.
         */
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        RSM_PSR_I(p0, r17, r31)                 // disable interrupts
        cmp.eq p0,pLvSys=r0,r0                  // pLvSys=0: leave from kernel
 (pKStk)        adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
@@ -1120,7 +1120,7 @@ skip_rbs_switch:
 
        /*
         * On entry:
-        *      r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
+        *      r20 = &current->thread_info->pre_count (if CONFIG_PREEMPTION)
         *      r31 = current->thread_info->flags
         * On exit:
         *      p6 = TRUE if work-pending-check needs to be redone
index b8356edbde659077cb0f56fd495c69d6874eae5b..a6d6a0556f089a7189b7ee8518f8cf8240c4212c 100644 (file)
@@ -841,7 +841,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
                return 1;
        }
 
-#if !defined(CONFIG_PREEMPT)
+#if !defined(CONFIG_PREEMPTION)
        if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
                /* Boost up -- we can execute copied instructions directly */
                ia64_psr(regs)->ri = p->ainsn.slot;
index 58fd67068bac5036e96514039688fc3844a635aa..b01d68a2d5d97360d0b612e465fb70635aa80ef6 100644 (file)
@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
index 6663f1741798e83f53c58641f13ca2f9ec8b544e..6ad6cdac74b3dc420b4126c194c58a20d6d2d601 100644 (file)
@@ -14,6 +14,7 @@ config M68K
        select HAVE_AOUT if MMU
        select HAVE_ASM_MODVERSIONS
        select HAVE_DEBUG_BUGVERBOSE
+       select HAVE_COPY_THREAD_TLS
        select GENERIC_IRQ_SHOW
        select GENERIC_ATOMIC64
        select HAVE_UID16
index 619d30d663a2f515aacf4b0e7a18044800227b7b..e1134c3e0b69d5d328872f24b2e5eff809b7b739 100644 (file)
@@ -562,6 +562,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -574,7 +575,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -612,6 +613,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -620,6 +624,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -651,4 +656,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index caa0558abcdbdeb3aa07180ce699396b9cb700a4..484cb1643df1719d26b6170d1d31b4d6bd941c6f 100644 (file)
@@ -518,6 +518,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -530,7 +531,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -568,6 +569,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -576,6 +580,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -607,4 +612,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 2551c7e9ac54e8df32f0f69ee2c39ba1abeffba6..eb6a46b6d135536d504b9541969f63d8b20ecefa 100644 (file)
@@ -540,6 +540,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -552,7 +553,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -590,6 +591,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -598,6 +602,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -629,4 +634,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 4ffc1e5646d5139de004faf2e1f85ab513b58cb6..bee9263a409c4e18641cfcb307c1ff783aa7c6e1 100644 (file)
@@ -511,6 +511,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -523,7 +524,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -561,6 +562,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -569,6 +573,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -600,4 +605,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 806da3d97ca4e93955434efa9fd8a2781ee83f51..c8847a8bcbd6d23f41648138d567852e12e08a48 100644 (file)
@@ -520,6 +520,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -532,7 +533,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -570,6 +571,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -578,6 +582,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -609,4 +614,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 250da20e291c83e066f3dded977d3d98a35a9d5b..303ffafd9cad0d603868c37b22be2f327b6bfd38 100644 (file)
@@ -542,6 +542,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -554,7 +555,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -592,6 +593,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -600,6 +604,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -631,4 +636,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index b764a0368a568be595b7de157aabb0400d431015..89a704226cd9e4e51845de0c2d2cac3829ac0cee 100644 (file)
@@ -628,6 +628,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -640,7 +641,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -678,6 +679,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -686,6 +690,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -717,4 +722,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 7800d3a8d46e3e598da133368a7eedf05409ad0d..f62c1f4d03a0330e4cbae106609c292d040e968d 100644 (file)
@@ -510,6 +510,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -522,7 +523,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -560,6 +561,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -568,6 +572,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -599,4 +604,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index c32dc2d2058d32bf09ac1d0c3fbf131dcc27bbde..58dcad26a7513222c6776e4d1f7d351d71331651 100644 (file)
@@ -511,6 +511,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -523,7 +524,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -561,6 +562,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -569,6 +573,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -600,4 +605,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index bf0a65ce57e0606dab7d850977b6e1cb93f5a3fa..5d3c28d1d545b4a0a900b771edc6baf7d7b6f193 100644 (file)
@@ -529,6 +529,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -541,7 +542,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -579,6 +580,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -587,6 +591,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -618,4 +623,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 5f3cfa2926d22960d4cfb9e769d272d132ae2bc6..5ef9e17dcd51788a0076843338586d736f70dea5 100644 (file)
@@ -513,6 +513,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -525,7 +526,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -563,6 +564,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
index 58354d2018d5151ea451f58ed2231b78e4cbadc2..22e1accc60a3e110c5089df3a8b4d246bbc84298 100644 (file)
@@ -512,6 +512,7 @@ CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
 CONFIG_CRYPTO_CFB=m
@@ -524,7 +525,7 @@ CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -562,6 +563,9 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
@@ -570,6 +574,7 @@ CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_SORT=m
 CONFIG_REED_SOLOMON_TEST=m
@@ -601,4 +606,3 @@ CONFIG_TEST_KMOD=m
 CONFIG_TEST_MEMCAT_P=m
 CONFIG_TEST_STACKINIT=m
 CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
index 559cb91bede11c8ce1b88dc5a1d41f188550765e..dec05743d426513901fe509c6c94a05c0fb45b01 100644 (file)
@@ -27,7 +27,6 @@ static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
 
-#define ioremap_nocache ioremap
 #define ioremap_uc ioremap
 #define ioremap_wt ioremap_wt
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
index 2e0047cf86f838487ceb84a5e78e80f3bc2316c6..4ae52414cd9d4392e509d0301161ac28b4d44956 100644 (file)
@@ -30,5 +30,6 @@
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE3
 
 #endif /* _ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/include/asm/vmalloc.h b/arch/m68k/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..bc1dca6
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_M68K_VMALLOC_H
+#define _ASM_M68K_VMALLOC_H
+
+#endif /* _ASM_M68K_VMALLOC_H */
index 97cd3ea5f10b837949b124024951912d6973e8f6..9dd76fbb7c6b2752b0a8eefb847a65e2f26251b1 100644 (file)
@@ -69,6 +69,13 @@ ENTRY(__sys_vfork)
        lea     %sp@(24),%sp
        rts
 
+ENTRY(__sys_clone3)
+       SAVE_SWITCH_STACK
+       pea     %sp@(SWITCH_STACK_SIZE)
+       jbsr    m68k_clone3
+       lea     %sp@(28),%sp
+       rts
+
 ENTRY(sys_sigreturn)
        SAVE_SWITCH_STACK
        movel   %sp,%sp@-                 | switch_stack pointer
index 4e77a06735c1b8a8d591bee7bde4677495307ef3..8f0d9140700f09ad2f1135cfd078052d159e973b 100644 (file)
@@ -30,8 +30,9 @@
 #include <linux/init_task.h>
 #include <linux/mqueue.h>
 #include <linux/rcupdate.h>
-
+#include <linux/syscalls.h>
 #include <linux/uaccess.h>
+
 #include <asm/traps.h>
 #include <asm/machdep.h>
 #include <asm/setup.h>
@@ -107,20 +108,43 @@ void flush_thread(void)
  * on top of pt_regs, which means that sys_clone() arguments would be
  * buried.  We could, of course, copy them, but it's too costly for no
  * good reason - generic clone() would have to copy them *again* for
- * do_fork() anyway.  So in this case it's actually better to pass pt_regs *
- * and extract arguments for do_fork() from there.  Eventually we might
- * go for calling do_fork() directly from the wrapper, but only after we
- * are finished with do_fork() prototype conversion.
+ * _do_fork() anyway.  So in this case it's actually better to pass pt_regs *
+ * and extract arguments for _do_fork() from there.  Eventually we might
+ * go for calling _do_fork() directly from the wrapper, but only after we
+ * are finished with _do_fork() prototype conversion.
  */
 asmlinkage int m68k_clone(struct pt_regs *regs)
 {
        /* regs will be equal to current_pt_regs() */
-       return do_fork(regs->d1, regs->d2, 0,
-                      (int __user *)regs->d3, (int __user *)regs->d4);
+       struct kernel_clone_args args = {
+               .flags          = regs->d1 & ~CSIGNAL,
+               .pidfd          = (int __user *)regs->d3,
+               .child_tid      = (int __user *)regs->d4,
+               .parent_tid     = (int __user *)regs->d3,
+               .exit_signal    = regs->d1 & CSIGNAL,
+               .stack          = regs->d2,
+               .tls            = regs->d5,
+       };
+
+       if (!legacy_clone_args_valid(&args))
+               return -EINVAL;
+
+       return _do_fork(&args);
+}
+
+/*
+ * Because extra registers are saved on the stack after the sys_clone3()
+ * arguments, this C wrapper extracts them from pt_regs * and then calls the
+ * generic sys_clone3() implementation.
+ */
+asmlinkage int m68k_clone3(struct pt_regs *regs)
+{
+       return sys_clone3((struct clone_args __user *)regs->d1, regs->d2);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-                unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+                   unsigned long arg, struct task_struct *p,
+                   unsigned long tls)
 {
        struct fork_frame {
                struct switch_stack sw;
@@ -155,7 +179,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.usp = usp ?: rdusp();
 
        if (clone_flags & CLONE_SETTLS)
-               task_thread_info(p)->tp_value = frame->regs.d5;
+               task_thread_info(p)->tp_value = tls;
 
 #ifdef CONFIG_FPU
        if (!FPU_IS_EMU) {
index a88a285a0e5f6c5ca724ad27c36b59b362f0b988..a00a5d0db602fba71725ecb897538dacfe48fbfb 100644 (file)
 432    common  fsmount                         sys_fsmount
 433    common  fspick                          sys_fspick
 434    common  pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    common  clone3                          __sys_clone3
index 5f46ebe7bfe3c641512e560c18f1cf240799c894..a105f113fd67c690ffd387dd5c377af44c31b9ed 100644 (file)
@@ -11,7 +11,7 @@ config MICROBLAZE
        select ARCH_HAS_UNCACHED_SEGMENT if !MMU
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_WANT_IPC_PARSE_VERSION
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select TIMER_OF
        select CLONE_BACKWARDS3
        select COMMON_CLK
diff --git a/arch/microblaze/include/asm/vmalloc.h b/arch/microblaze/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..04013a4
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_MICROBLAZE_VMALLOC_H
+#define _ASM_MICROBLAZE_VMALLOC_H
+
+#endif /* _ASM_MICROBLAZE_VMALLOC_H */
index de7083bd1d2427e2bd5935dbd592303e957f8314..f6ded356394afc718b761e250df029600c2f6107 100644 (file)
@@ -728,7 +728,7 @@ no_intr_resched:
        bri     6f;
 /* MS: Return to kernel state. */
 2:
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        lwi     r11, CURRENT_TASK, TS_THREAD_INFO;
        /* MS: get preempt_count from thread info */
        lwi     r5, r11, TI_PREEMPT_COUNT;
index add388236f4ef2cdf2558b0d282605fadce6912a..a2739a34bb1221beefdcbc8f45c9280989a5b40e 100644 (file)
@@ -15,7 +15,7 @@ config MIPS
        select ARCH_USE_QUEUED_SPINLOCKS
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_IPC_PARSE_VERSION
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS
        select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
        select CPU_PM if CPU_IDLE
@@ -47,7 +47,7 @@ config MIPS
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
        select HAVE_ASM_MODVERSIONS
-       select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
+       select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
        select HAVE_CONTEXT_TRACKING
        select HAVE_COPY_THREAD_TLS
        select HAVE_C_RECORDMCOUNT
index 7de162432d7f670d63647038d013a545fb0a64a3..95def949c9714597fe9d6330d46e81c33b37807d 100644 (file)
@@ -236,9 +236,9 @@ static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
 
 static void __init tnetd7300_init_clocks(void)
 {
-       u32 *bootcr = (u32 *)ioremap_nocache(AR7_REGS_DCL, 4);
+       u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
        struct tnetd7300_clocks *clocks =
-                                       ioremap_nocache(UR8_REGS_CLOCKS,
+                                       ioremap(UR8_REGS_CLOCKS,
                                        sizeof(struct tnetd7300_clocks));
 
        bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
@@ -320,9 +320,9 @@ static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr)
 
 static void __init tnetd7200_init_clocks(void)
 {
-       u32 *bootcr = (u32 *)ioremap_nocache(AR7_REGS_DCL, 4);
+       u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
        struct tnetd7200_clocks *clocks =
-                                       ioremap_nocache(AR7_REGS_CLOCKS,
+                                       ioremap(AR7_REGS_CLOCKS,
                                        sizeof(struct tnetd7200_clocks));
        int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
        int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
index 2292e55c12e23c00435271934caf67e7da7045b1..8b006addd6ba524a3bc33e641669fe6b5ced8b62 100644 (file)
@@ -308,7 +308,7 @@ int __init ar7_gpio_init(void)
                size = 0x1f;
        }
 
-       gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size);
+       gpch->regs = ioremap(AR7_REGS_GPIO, size);
        if (!gpch->regs) {
                printk(KERN_ERR "%s: failed to ioremap regs\n",
                                        gpch->chip.label);
index 1f2028266493c3768735193b94b4b516884317e8..215149a85d83ba44b2538d77909108956a4e6475 100644 (file)
@@ -702,7 +702,7 @@ static int __init ar7_register_devices(void)
                pr_warn("unable to register usb slave: %d\n", res);
 
        /* Register watchdog only if enabled in hardware */
-       bootcr = ioremap_nocache(AR7_REGS_DCL, 4);
+       bootcr = ioremap(AR7_REGS_DCL, 4);
        val = readl(bootcr);
        iounmap(bootcr);
        if (val & AR7_WDT_HW_ENA) {
index 8da996142d6a04c5336549c65a8b578142dbd783..24f619199ee76447859683e2857d0c20f59c062f 100644 (file)
@@ -262,7 +262,7 @@ void __init ar2315_plat_mem_setup(void)
        u32 config;
 
        /* Detect memory size */
-       sdram_base = ioremap_nocache(AR2315_SDRAMCTL_BASE,
+       sdram_base = ioremap(AR2315_SDRAMCTL_BASE,
                                     AR2315_SDRAMCTL_SIZE);
        memcfg = __raw_readl(sdram_base + AR2315_MEM_CFG);
        memsize   = 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_DATA_WIDTH);
@@ -272,7 +272,7 @@ void __init ar2315_plat_mem_setup(void)
        add_memory_region(0, memsize, BOOT_MEM_RAM);
        iounmap(sdram_base);
 
-       ar2315_rst_base = ioremap_nocache(AR2315_RST_BASE, AR2315_RST_SIZE);
+       ar2315_rst_base = ioremap(AR2315_RST_BASE, AR2315_RST_SIZE);
 
        /* Detect the hardware based on the device ID */
        devid = ar2315_rst_reg_read(AR2315_SREV) & AR2315_REV_CHIP;
index acd55a9cffe3ecce5a7c6b03dab67c8adcc8b8d1..47f3e98974fc63f85bcccc1228a76ef8d6501449 100644 (file)
@@ -185,7 +185,7 @@ static void __init ar5312_flash_init(void)
        void __iomem *flashctl_base;
        u32 ctl;
 
-       flashctl_base = ioremap_nocache(AR5312_FLASHCTL_BASE,
+       flashctl_base = ioremap(AR5312_FLASHCTL_BASE,
                                        AR5312_FLASHCTL_SIZE);
 
        ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0);
@@ -358,7 +358,7 @@ void __init ar5312_plat_mem_setup(void)
        u32 devid;
 
        /* Detect memory size */
-       sdram_base = ioremap_nocache(AR5312_SDRAMCTL_BASE,
+       sdram_base = ioremap(AR5312_SDRAMCTL_BASE,
                                     AR5312_SDRAMCTL_SIZE);
        memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1);
        bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0);
@@ -369,7 +369,7 @@ void __init ar5312_plat_mem_setup(void)
        add_memory_region(0, memsize, BOOT_MEM_RAM);
        iounmap(sdram_base);
 
-       ar5312_rst_base = ioremap_nocache(AR5312_RST_BASE, AR5312_RST_SIZE);
+       ar5312_rst_base = ioremap(AR5312_RST_BASE, AR5312_RST_SIZE);
 
        devid = ar5312_rst_reg_read(AR5312_REV);
        devid >>= AR5312_REV_WMAC_MIN_S;
index 989e71015ee67ce30eafe26c6f76ab8955c42ebb..cb99f97399102fa77a22aa877bae2ed7c6f70760 100644 (file)
@@ -111,7 +111,7 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
        u8 *mac_addr;
        u32 offset;
 
-       flash_base = ioremap_nocache(base, size);
+       flash_base = ioremap(base, size);
        flash_limit = flash_base + size;
 
        ath25_board.config = NULL;
index 63eacb8b0eb5b77a3f6a8d1e8cd7979e185d69a8..137abbc65c60d1c98ca8f63d6f7544877cc66cb7 100644 (file)
@@ -41,7 +41,7 @@ static void __iomem *ath79_ddr_pci_win_base;
 
 void ath79_ddr_ctrl_init(void)
 {
-       ath79_ddr_base = ioremap_nocache(AR71XX_DDR_CTRL_BASE,
+       ath79_ddr_base = ioremap(AR71XX_DDR_CTRL_BASE,
                                         AR71XX_DDR_CTRL_SIZE);
        if (soc_is_ar913x() || soc_is_ar724x() || soc_is_ar933x()) {
                ath79_ddr_wb_flush_base = ath79_ddr_base + 0x7c;
index ea385a865781fce391e332fb865ba2509265539b..484ee28922a9ac3f2509a3a958a23cb02008128b 100644 (file)
@@ -226,9 +226,9 @@ void __init plat_mem_setup(void)
        else if (fw_passed_dtb)
                __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
 
-       ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
+       ath79_reset_base = ioremap(AR71XX_RESET_BASE,
                                           AR71XX_RESET_SIZE);
-       ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
+       ath79_pll_base = ioremap(AR71XX_PLL_BASE,
                                         AR71XX_PLL_SIZE);
        ath79_detect_sys_type();
        ath79_ddr_ctrl_init();
index 172801ed35b89994f6a52e492a7c486cb517d821..d859f079b771a158208b0cef571a22a22c3f67ac 100644 (file)
@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
        -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
        -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
 
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT                := n
+
 # decompressor objects (linked with vmlinuz)
 vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
 
index ba8f82a29a816a0bc7b5a6793403ccfa09f5c7d9..e794b2d53adfcf9d9c0a94d93f7e3109c725ae40 100644 (file)
@@ -44,13 +44,6 @@ static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
 
 /* See header file for descriptions of functions */
 
-/**
- * This macro returns the size of a member of a structure.
- * Logically it is the same as "sizeof(s::field)" in C++, but
- * C lacks the "::" operator.
- */
-#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
-
 /**
  * This macro returns a member of the
  * cvmx_bootmem_named_block_desc_t structure. These members can't
@@ -65,7 +58,7 @@ static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
 #define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field)                      \
        __cvmx_bootmem_desc_get(addr,                                   \
                offsetof(struct cvmx_bootmem_named_block_desc, field),  \
-               SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+               sizeof_field(struct cvmx_bootmem_named_block_desc, field))
 
 /**
  * This function is the implementation of the get macros defined
index 1f742c32a883bbef7cfb2fd7ebb41c8fbac25860..4f34d92b52f9828f9cd5e5ba73bace306d8ff825 100644 (file)
@@ -357,7 +357,7 @@ static void octeon_write_lcd(const char *s)
 {
        if (octeon_bootinfo->led_display_base_addr) {
                void __iomem *lcd_address =
-                       ioremap_nocache(octeon_bootinfo->led_display_base_addr,
+                       ioremap(octeon_bootinfo->led_display_base_addr,
                                        8);
                int i;
                for (i = 0; i < 8; i++, s++) {
index 06d92fb377699a74487c3b60227916153890b487..c238e95190acf914d3fec973140c7a0e9b1764df 100644 (file)
@@ -51,7 +51,7 @@ static void __init ocelot_earlyprintk_init(void)
 {
        void __iomem *uart_base;
 
-       uart_base = ioremap_nocache(UART_UART, 0x20);
+       uart_base = ioremap(UART_UART, 0x20);
        setup_8250_early_printk_port((unsigned long)uart_base, 2, 50000);
 }
 
index feb069cbf44e8a691f50df5711af88195bfc5d6b..655f40ddb6d1e20df5768dba678dc96706bd0cc3 100644 (file)
@@ -63,7 +63,7 @@
        .endm
 
        .macro  local_irq_disable reg=t0
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        lw      \reg, TI_PRE_COUNT($28)
        addi    \reg, \reg, 1
        sw      \reg, TI_PRE_COUNT($28)
@@ -73,7 +73,7 @@
        xori    \reg, \reg, 1
        mtc0    \reg, CP0_STATUS
        irq_disable_hazard
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        lw      \reg, TI_PRE_COUNT($28)
        addi    \reg, \reg, -1
        sw      \reg, TI_PRE_COUNT($28)
index c46c59b0f1b40ce0b4b9abee2176056b1ba95253..49f0061a60514358358da251adc2a76634eb7969 100644 (file)
@@ -15,7 +15,8 @@
 static inline int __pure __get_cpu_type(const int cpu_type)
 {
        switch (cpu_type) {
-#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF)
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
+    defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
        case CPU_LOONGSON2EF:
 #endif
 
index 3f6ce74335b47982e8d1f8c4b16d9f4638227475..cf1f2a4a241898317d1c622a9af115e914bad4f4 100644 (file)
@@ -227,29 +227,7 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset,
  */
 #define ioremap(offset, size)                                          \
        __ioremap_mode((offset), (size), _CACHE_UNCACHED)
-
-/*
- * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- */
-#define ioremap_nocache(offset, size)                                  \
-       __ioremap_mode((offset), (size), _CACHE_UNCACHED)
-#define ioremap_uc ioremap_nocache
+#define ioremap_uc             ioremap
 
 /*
  * ioremap_cache -     map bus memory into CPU space
index 4993db40482c80fa17cacec1ef54f4e356b6d26d..ee26f9a4575dfc2b09568855015200043ca0ffbe 100644 (file)
@@ -49,8 +49,26 @@ struct thread_info {
        .addr_limit     = KERNEL_DS,            \
 }
 
-/* How to get the thread information struct from C.  */
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
 register struct thread_info *__current_thread_info __asm__("$28");
+#endif
 
 static inline struct thread_info *current_thread_info(void)
 {
index b08825531e9f9a3ce89e926ffc3e096fbfe14fc0..a58687e26c5db23cb588137c72a5f6bc016136c5 100644 (file)
@@ -26,8 +26,6 @@
 
 #define __VDSO_USE_SYSCALL             ULLONG_MAX
 
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
 static __always_inline long gettimeofday_fallback(
                                struct __kernel_old_timeval *_tv,
                                struct timezone *_tz)
@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
        return error ? -ret : ret;
 }
 
-#else
-
-static __always_inline long gettimeofday_fallback(
-                               struct __kernel_old_timeval *_tv,
-                               struct timezone *_tz)
-{
-       return -1;
-}
-
-#endif
-
 static __always_inline long clock_gettime_fallback(
                                        clockid_t _clkid,
                                        struct __kernel_timespec *_ts)
@@ -109,8 +96,6 @@ static __always_inline int clock_getres_fallback(
 
 #if _MIPS_SIM != _MIPS_SIM_ABI64
 
-#define VDSO_HAS_32BIT_FALLBACK        1
-
 static __always_inline long clock_gettime32_fallback(
                                        clockid_t _clkid,
                                        struct old_timespec32 *_ts)
diff --git a/arch/mips/include/asm/vmalloc.h b/arch/mips/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..25dc09b
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_MIPS_VMALLOC_H
+#define _ASM_MIPS_VMALLOC_H
+
+#endif /* _ASM_MIPS_VMALLOC_H */
index f777e44653d5767b953483f22158d24e84e6461f..47312c5294102264700c12b9955d39b9cd60ab79 100644 (file)
@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
        return 0;
 }
 
+static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
+{
+       int cpu1;
+
+       for_each_possible_cpu(cpu1)
+               if (cpus_are_siblings(cpu, cpu1))
+                       cpumask_set_cpu(cpu1, cpu_map);
+}
+
+static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
+{
+       int cpu1;
+       int cluster = cpu_cluster(&cpu_data[cpu]);
+
+       for_each_possible_cpu(cpu1)
+               if (cpu_cluster(&cpu_data[cpu1]) == cluster)
+                       cpumask_set_cpu(cpu1, cpu_map);
+}
+
 static int __populate_cache_leaves(unsigned int cpu)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
        struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
        if (c->icache.waysize) {
+               /* L1 caches are per core */
+               fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
                populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
+               fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
                populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
        } else {
                populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
        }
 
-       if (c->scache.waysize)
+       if (c->scache.waysize) {
+               /* L2 cache is per cluster */
+               fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
                populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
+       }
 
        if (c->tcache.waysize)
                populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
index 5469d43b696685d38025367eac7a3ba56bac2d0b..4849a48afc0f8a7e9e3834c5e9f2fbf6e368e66f 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/thread_info.h>
 #include <asm/war.h>
 
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
 #define resume_kernel  restore_all
 #else
 #define __ret_from_irq ret_from_exception
@@ -27,7 +27,7 @@
 
        .text
        .align  5
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
 FEXPORT(ret_from_exception)
        local_irq_disable                       # preempt stop
        b       __ret_from_irq
@@ -53,7 +53,7 @@ resume_userspace:
        bnez    t0, work_pending
        j       restore_all
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 resume_kernel:
        local_irq_disable
        lw      t0, TI_PRE_COUNT($28)
index e5ea3db23d6b4acd6cfcb672bdc70f33c2307d04..cdb93ed91cdec8f229ac60324e83a9345a1402aa 100644 (file)
@@ -194,7 +194,7 @@ static void mips_cm_probe_l2sync(void)
        write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN);
 
        /* Map the region */
-       mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
+       mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
 }
 
 int mips_cm_probe(void)
@@ -215,7 +215,7 @@ int mips_cm_probe(void)
        if (!addr)
                return -ENODEV;
 
-       mips_gcr_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+       mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE);
        if (!mips_gcr_base)
                return -ENXIO;
 
index 69e3e0b556bf7fd9bcd3533c3e7c2502e8224b45..8d2535123f11c8f31bdb87c95e09dc165e18864b 100644 (file)
@@ -78,7 +78,7 @@ int mips_cpc_probe(void)
        if (!addr)
                return -ENODEV;
 
-       mips_cpc_base = ioremap_nocache(addr, 0x8000);
+       mips_cpc_base = ioremap(addr, 0x8000);
        if (!mips_cpc_base)
                return -ENXIO;
 
index 037b08f3257e0a4e93b2f33f22463919a2a566fc..42222f849bd25d2d63ee9d42413d3d94cd34f799 100644 (file)
@@ -221,16 +221,16 @@ void __init ltq_soc_init(void)
                                res_sys[2].name) < 0))
                pr_err("Failed to request core resources");
 
-       status_membase = ioremap_nocache(res_status.start,
+       status_membase = ioremap(res_status.start,
                                        resource_size(&res_status));
-       ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+       ltq_ebu_membase = ioremap(res_ebu.start,
                                        resource_size(&res_ebu));
 
        if (!status_membase || !ltq_ebu_membase)
                panic("Failed to remap core resources");
 
        for (i = 0; i < 3; i++) {
-               sysctl_membase[i] = ioremap_nocache(res_sys[i].start,
+               sysctl_membase[i] = ioremap(res_sys[i].start,
                                                resource_size(&res_sys[i]));
                if (!sysctl_membase[i])
                        panic("Failed to remap sysctrl resources");
index 115b417dfb8e3a5c1544db506024314619a1a607..df8eed3875f6d41992292e22381841dc4ca2cdad 100644 (file)
@@ -349,7 +349,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
                                        res.name))
                        pr_err("Failed to request icu%i memory\n", vpe);
 
-               ltq_icu_membase[vpe] = ioremap_nocache(res.start,
+               ltq_icu_membase[vpe] = ioremap(res.start,
                                        resource_size(&res));
 
                if (!ltq_icu_membase[vpe])
@@ -402,7 +402,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
                                                        res.name))
                        pr_err("Failed to request eiu memory");
 
-               ltq_eiu_membase = ioremap_nocache(res.start,
+               ltq_eiu_membase = ioremap(res.start,
                                                        resource_size(&res));
                if (!ltq_eiu_membase)
                        panic("Failed to remap eiu memory");
index 156a95ac5c7256e95febf6cf2f22f26fad267224..aa37545ebe8f72d73cd5e9281342695ef05ae553 100644 (file)
@@ -431,10 +431,10 @@ void __init ltq_soc_init(void)
                                res_ebu.name))
                pr_err("Failed to request core resources");
 
-       pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu));
-       ltq_cgu_membase = ioremap_nocache(res_cgu.start,
+       pmu_membase = ioremap(res_pmu.start, resource_size(&res_pmu));
+       ltq_cgu_membase = ioremap(res_cgu.start,
                                                resource_size(&res_cgu));
-       ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+       ltq_ebu_membase = ioremap(res_ebu.start,
                                                resource_size(&res_ebu));
        if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
                panic("Failed to remap core resources");
index e7c87161ce0033b2b77db8ad2a26d5ce863d6a44..e49c406469951cff56c45ec46108024d6aaa291f 100644 (file)
 static inline void loongson_reboot(void)
 {
 #ifndef CONFIG_CPU_JUMP_WORKAROUNDS
-       ((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) ();
+       ((void (*)(void))ioremap(LOONGSON_BOOT_BASE, 4)) ();
 #else
        void (*func)(void);
 
-       func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4);
+       func = (void *)ioremap(LOONGSON_BOOT_BASE, 4);
 
        __asm__ __volatile__(
        "       .set    noat                                            \n"
index 73dd251424843c1f343b6e609a9d72023ab8be09..fd76114fa3b0a526a55007dc2b32526abf45a227 100644 (file)
@@ -26,13 +26,13 @@ void __init prom_init(void)
                memsize = DEFAULT_MEMSIZE;
 
        if (strstr(arcs_cmdline, "console=ttyS3"))
-               uart_base = ioremap_nocache(LS1X_UART3_BASE, 0x0f);
+               uart_base = ioremap(LS1X_UART3_BASE, 0x0f);
        else if (strstr(arcs_cmdline, "console=ttyS2"))
-               uart_base = ioremap_nocache(LS1X_UART2_BASE, 0x0f);
+               uart_base = ioremap(LS1X_UART2_BASE, 0x0f);
        else if (strstr(arcs_cmdline, "console=ttyS1"))
-               uart_base = ioremap_nocache(LS1X_UART1_BASE, 0x0f);
+               uart_base = ioremap(LS1X_UART1_BASE, 0x0f);
        else
-               uart_base = ioremap_nocache(LS1X_UART0_BASE, 0x0f);
+               uart_base = ioremap(LS1X_UART0_BASE, 0x0f);
        setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
 }
 
index 6c36a414dde71a6bc12e4377226d9ad37115133a..0c7399b303fbe7b073929ec4f7f181f0ec4c5619 100644 (file)
@@ -37,7 +37,7 @@ static void ls1x_power_off(void)
 
 static int __init ls1x_reboot_setup(void)
 {
-       wdt_reg_base = ioremap_nocache(LS1X_WDT_BASE, (SZ_4 + SZ_8));
+       wdt_reg_base = ioremap(LS1X_WDT_BASE, (SZ_4 + SZ_8));
        if (!wdt_reg_base)
                panic("Failed to remap watchdog registers");
 
index f97662045c73c8c1d17b6d6f4bafe454cf13b6d3..4cc73f7ac0d406c08799ec1722a5522e55c4035e 100644 (file)
@@ -49,7 +49,7 @@ static inline void ls1x_pwmtimer_restart(void)
 
 void __init ls1x_pwmtimer_init(void)
 {
-       timer_reg_base = ioremap_nocache(LS1X_TIMER_BASE, SZ_16);
+       timer_reg_base = ioremap(LS1X_TIMER_BASE, SZ_16);
        if (!timer_reg_base)
                panic("Failed to remap timer registers");
 
index 88b3bd5fed257978f5d1162bb73c4854ab852e20..bc7671079f0c62f701bd0aa61a3c0aa91dc020e4 100644 (file)
@@ -17,7 +17,7 @@
 
 static inline void loongson_reboot(void)
 {
-       ((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) ();
+       ((void (*)(void))ioremap(LOONGSON_BOOT_BASE, 4)) ();
 }
 
 static void loongson_restart(char *command)
index 98a063093b69aac2d66f782736125d62cddf41b1..0ddf03df626881d34293e7aea7bd8cd73a32f646 100644 (file)
@@ -240,7 +240,7 @@ static void __init remove_gic(void *fdt)
                 * On systems using the RocIT system controller a GIC may be
                 * present without a CM. Detect whether that is the case.
                 */
-               biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
+               biu_base = ioremap(MSC01_BIU_REG_BASE,
                                MSC01_BIU_ADDRSPACE_SZ);
                sc_cfg = __raw_readl(biu_base + MSC01_SC_CFG_OFS);
                if (sc_cfg & MSC01_SC_CFG_GICPRES_MSK) {
index 46b76751f3a5fface8c7c2578487a3c043f9c258..561154cbcc401eb8e5eee381af4873f638583fc1 100644 (file)
@@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
 static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
 {
        int off, b_off;
+       int tcc_reg;
 
        ctx->flags |= EBPF_SEEN_TC;
        /*
@@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
        b_off = b_imm(this_idx + 1, ctx);
        emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
        /*
-        * if (--TCC < 0)
+        * if (TCC-- < 0)
         *     goto out;
         */
        /* Delay slot */
-       emit_instr(ctx, daddiu, MIPS_R_T5,
-                  (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
+       tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
+       emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
        b_off = b_imm(this_idx + 1, ctx);
-       emit_instr(ctx, bltz, MIPS_R_T5, b_off);
+       emit_instr(ctx, bltz, tcc_reg, b_off);
        /*
         * prog = array->ptrs[index];
         * if (prog == NULL)
@@ -1803,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        unsigned int image_size;
        u8 *image_ptr;
 
-       if (!prog->jit_requested || MIPS_ISA_REV < 2)
+       if (!prog->jit_requested)
                return prog;
 
        tmp = bpf_jit_blind_constants(prog);
index 4f2411f489afbc2bd7b481dae87bcc4e1a048d57..01a2af8215c8d1746748eff19703036ec8674d97 100644 (file)
@@ -409,7 +409,7 @@ static int alchemy_pci_probe(struct platform_device *pdev)
                goto out6;
        }
 
-       ctx->regs = ioremap_nocache(r->start, resource_size(r));
+       ctx->regs = ioremap(r->start, resource_size(r));
        if (!ctx->regs) {
                dev_err(&pdev->dev, "cannot map pci regs\n");
                ret = -ENODEV;
index 0fed6fc17fe4090e40b3422a1316d1e6c4ec8d70..490953f515282af4776efb371dedfc72667b3335 100644 (file)
@@ -441,7 +441,7 @@ static int ar2315_pci_probe(struct platform_device *pdev)
        apc->mem_res.flags = IORESOURCE_MEM;
 
        /* Remap PCI config space */
-       apc->cfg_mem = devm_ioremap_nocache(dev, res->start,
+       apc->cfg_mem = devm_ioremap(dev, res->start,
                                            AR2315_PCI_CFG_SIZE);
        if (!apc->cfg_mem) {
                dev_err(dev, "failed to remap PCI config space\n");
index 151d9b5870bb70a4553ed3a5fc2574fd59986473..5548365605c02faa5d82d2b506aedd72b08ff24b 100644 (file)
@@ -221,7 +221,7 @@ static int __init bcm63xx_register_pci(void)
         * a spinlock for each io access, so this is currently kind of
         * broken on SMP.
         */
-       pci_iospace_start = ioremap_nocache(BCM_PCI_IO_BASE_PA, 4);
+       pci_iospace_start = ioremap(BCM_PCI_IO_BASE_PA, 4);
        if (!pci_iospace_start)
                return -ENOMEM;
 
index c9f4d4ba058aa8632b8d7435a106c4d620b1a3f0..e1f12e39813634ae8f9ecff13c0aff0cad1d6c82 100644 (file)
@@ -218,7 +218,7 @@ static int rt288x_pci_probe(struct platform_device *pdev)
 {
        void __iomem *io_map_base;
 
-       rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);
+       rt2880_pci_base = ioremap(RT2880_PCI_BASE, PAGE_SIZE);
 
        io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE);
        rt2880_pci_controller.io_map_base = (unsigned long) io_map_base;
index 8c236738b5ee300cc299f7bb848f5ea04501e030..25372e62783b584456162362d6765aca7a73b829 100644 (file)
@@ -135,7 +135,7 @@ void __init fw_init_early_console(char port)
        char *arch_cmdline = pic32_getcmdline();
        int baud = -1;
 
-       uart_base = ioremap_nocache(PIC32_BASE_UART, 0xc00);
+       uart_base = ioremap(PIC32_BASE_UART, 0xc00);
 
        baud = get_baud_from_cmdline(arch_cmdline);
        if (port == -1)
index 504e6ab399b5b63b636de37ac725638f424095dc..f2822632b017350ba067f9b833c13a0378bebf6d 100644 (file)
@@ -122,7 +122,7 @@ static const struct
 
 void pic32_pps_input(int function, int pin)
 {
-       void __iomem *pps_base = ioremap_nocache(PPS_BASE, 0xF4);
+       void __iomem *pps_base = ioremap(PPS_BASE, 0xF4);
        int i;
 
        for (i = 0; i < ARRAY_SIZE(input_pin_reg); i++) {
@@ -252,7 +252,7 @@ static const struct
 
 void pic32_pps_output(int function, int pin)
 {
-       void __iomem *pps_base = ioremap_nocache(PPS_BASE, 0x170);
+       void __iomem *pps_base = ioremap(PPS_BASE, 0x170);
        int i;
 
        for (i = 0; i < ARRAY_SIZE(output_pin_reg); i++) {
index 8e6e8db8dd5fa46e2749172c357d328968ddff09..940c684f692148290f8563d1363b66223fb8c446 100644 (file)
@@ -105,7 +105,7 @@ void __init msp_serial_setup(void)
 
        /* Initialize first serial port */
        up.mapbase      = MSP_UART0_BASE;
-       up.membase      = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+       up.membase      = ioremap(up.mapbase, MSP_UART_REG_LEN);
        up.irq          = MSP_INT_UART0;
        up.uartclk      = uartclk;
        up.regshift     = 2;
@@ -143,7 +143,7 @@ void __init msp_serial_setup(void)
        }
 
        up.mapbase      = MSP_UART1_BASE;
-       up.membase      = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+       up.membase      = ioremap(up.mapbase, MSP_UART_REG_LEN);
        up.irq          = MSP_INT_UART1;
        up.line         = 1;
        up.private_data         = (void*)UART1_STATUS_REG;
index c945d76cfce52dc81cc7083ae96aef9284dcdb51..220ca0cd7945e218942fe6d97c3972389d2a32b1 100644 (file)
@@ -165,7 +165,7 @@ static int __init intc_of_init(struct device_node *node,
                                res.name))
                pr_err("Failed to request intc memory");
 
-       rt_intc_membase = ioremap_nocache(res.start,
+       rt_intc_membase = ioremap(res.start,
                                        resource_size(&res));
        if (!rt_intc_membase)
                panic("Failed to remap intc memory");
index 59b23095bfbb422c97ac0b506576dd94ce0de769..90c6d4a11c5d1c027b35a62db5aaa202e456d8d8 100644 (file)
@@ -43,7 +43,7 @@ __iomem void *plat_of_remap_node(const char *node)
                                res.name))
                panic("Failed to request resources for %s", node);
 
-       return ioremap_nocache(res.start, resource_size(&res));
+       return ioremap(res.start, resource_size(&res));
 }
 
 void __init device_tree_init(void)
index c9ecf17f866055b3736d8d14e3c10cce4bd45749..dd34f1b32b7976ca316f50235448788d20b49160 100644 (file)
@@ -286,7 +286,7 @@ static int __init plat_setup_devices(void)
        nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000;
 
        /* Read and map device controller 3 */
-       dev3.base = ioremap_nocache(readl(IDT434_REG_BASE + DEV3BASE), 1);
+       dev3.base = ioremap(readl(IDT434_REG_BASE + DEV3BASE), 1);
 
        if (!dev3.base) {
                printk(KERN_ERR "rb532: cannot remap device controller 3\n");
index fdc704abc8d4f076215757c9c4122fa9c91e7a01..94f02ada4082622d72dac89e2d89463217be6f20 100644 (file)
@@ -192,7 +192,7 @@ int __init rb532_gpio_init(void)
        struct resource *r;
 
        r = rb532_gpio_reg0_res;
-       rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r));
+       rb532_gpio_chip->regbase = ioremap(r->start, resource_size(r));
 
        if (!rb532_gpio_chip->regbase) {
                printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
index 26e957b21fbf7709c35ac303843cffd669a52971..303cc3dc1749304461e11e6f8c89e6892dc5bb93 100644 (file)
@@ -110,7 +110,7 @@ void __init prom_init(void)
        phys_addr_t memsize;
        phys_addr_t ddrbase;
 
-       ddr = ioremap_nocache(ddr_reg[0].start,
+       ddr = ioremap(ddr_reg[0].start,
                        ddr_reg[0].end - ddr_reg[0].start);
 
        if (!ddr) {
index 1aa4df1385cbc674a966fcf560e9bf247b8e1360..51af9d374d66c45101e07c7265476f9902db5267 100644 (file)
@@ -49,7 +49,7 @@ void __init plat_mem_setup(void)
 
        set_io_port_base(KSEG1);
 
-       pci_reg = ioremap_nocache(pci0_res[0].start,
+       pci_reg = ioremap(pci0_res[0].start,
                                pci0_res[0].end - pci0_res[0].start);
        if (!pci_reg) {
                printk(KERN_ERR "Could not remap PCI registers\n");
index 160b88000b4b70815edeec254913bf834a347cdc..f6fa9afcbfd3462eb6913f3046acdb1ab06da26d 100644 (file)
@@ -399,10 +399,10 @@ void __init sni_rm200_i8259_irqs(void)
 {
        int i;
 
-       rm200_pic_master = ioremap_nocache(0x16000020, 4);
+       rm200_pic_master = ioremap(0x16000020, 4);
        if (!rm200_pic_master)
                return;
-       rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
+       rm200_pic_slave = ioremap(0x160000a0, 4);
        if (!rm200_pic_slave) {
                iounmap(rm200_pic_master);
                return;
index e05938997e696a99409215bc3ae7c44d7d8e58d4..b2a2e032dc991bb85738d776c5b0df7b30a558d2 100644 (file)
@@ -18,6 +18,10 @@ ccflags-vdso := \
        $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
        -D__VDSO__
 
+ifndef CONFIG_64BIT
+ccflags-vdso += -DBUILD_VDSO32
+endif
+
 ifdef CONFIG_CC_IS_CLANG
 ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
 endif
index 6ebdc37c89fc6a11a32314136c99a7f1a95bf7f3..6b83b6376a4b58d340f1c464cbbf402c588caf73 100644 (file)
@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
        return __cvdso_clock_gettime32(clock, ts);
 }
 
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
                        struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
 int __vdso_clock_getres(clockid_t clock_id,
                        struct old_timespec32 *res)
 {
@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
        return __cvdso_clock_gettime(clock, ts);
 }
 
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+
+/*
+ * This is behind the ifdef so that we don't provide the symbol when there's no
+ * possibility of there being a usable clocksource, because there's nothing we
+ * can do without it. When libc fails the symbol lookup it should fall back on
+ * the standard syscall path.
+ */
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
                        struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
+#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
+
 int __vdso_clock_getres(clockid_t clock_id,
                        struct __kernel_timespec *res)
 {
index 12c06a833b7cd8b869f260220b83c53186323b5e..e30298e99e1bdf00a1673eb124c5717523039c3e 100644 (file)
@@ -62,7 +62,7 @@ config GENERIC_HWEIGHT
 
 config GENERIC_LOCKBREAK
        def_bool y
-       depends on PREEMPT
+       depends on PREEMPTION
 
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
index d9ac7e6408ef3196253f3c6838d111a51a1f0973..caddded56e77f3603e93bbb0496b52a2dcb3a60a 100644 (file)
@@ -9,7 +9,11 @@
 #define PG_dcache_dirty PG_arch_1
 
 void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
+
 void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+#define flush_icache_page flush_icache_page
+
 #ifdef CONFIG_CPU_CACHE_ALIASING
 void flush_cache_mm(struct mm_struct *mm);
 void flush_cache_dup_mm(struct mm_struct *mm);
@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
 
 #else
-#include <asm-generic/cacheflush.h>
-#undef flush_icache_range
-#undef flush_icache_page
-#undef flush_icache_user_range
 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
                             unsigned long addr, int len);
+#define flush_icache_user_range flush_icache_user_range
+
+#include <asm-generic/cacheflush.h>
 #endif
 
 #endif /* __NDS32_CACHEFLUSH_H__ */
index 0214e415053902c34d65ecf03d4dd581c49fb72b..6abc58ac406dcd542b3bc5497d80452cba52ff0b 100644 (file)
@@ -195,7 +195,7 @@ extern void paging_init(void);
 #define pte_unmap(pte)         do { } while (0)
 #define pte_unmap_nested(pte)  do { } while (0)
 
-#define pmd_off_k(address)     pmd_offset(pgd_offset_k(address), address)
+#define pmd_off_k(address)     pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
 
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 /*
diff --git a/arch/nds32/include/asm/vmalloc.h b/arch/nds32/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..caeed38
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_NDS32_VMALLOC_H
+#define _ASM_NDS32_VMALLOC_H
+
+#endif /* _ASM_NDS32_VMALLOC_H */
index 1df02a79336417fb7eef1ca34d199f378b514659..6a2966c2d8c8f4e2af2dad5ede211de8c8f046fd 100644 (file)
@@ -72,7 +72,7 @@
        restore_user_regs_last
        .endm
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        .macro  preempt_stop
        .endm
 #else
@@ -158,7 +158,7 @@ no_work_pending:
 /*
  * preemptive kernel
  */
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 resume_kernel:
        gie_disable
        lwi     $t0, [tsk+#TSK_TI_PREEMPT]
index fd2a54b8cd5733f8ce64455bdf8762bd8a3a8251..22ab77ea27ad37114925f41a964af15a0d04ac0c 100644 (file)
@@ -89,18 +89,6 @@ int __init ftrace_dyn_arch_init(void)
        return 0;
 }
 
-int ftrace_arch_code_modify_prepare(void)
-{
-       set_all_modules_text_rw();
-       return 0;
-}
-
-int ftrace_arch_code_modify_post_process(void)
-{
-       set_all_modules_text_ro();
-       return 0;
-}
-
 static unsigned long gen_sethi_insn(unsigned long addr)
 {
        unsigned long opcode = 0x46000000;
diff --git a/arch/nios2/include/asm/vmalloc.h b/arch/nios2/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..ec7a926
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_NIOS2_VMALLOC_H
+#define _ASM_NIOS2_VMALLOC_H
+
+#endif /* _ASM_NIOS2_VMALLOC_H */
index 1e515ccd698e38f4a437b43ee6a97eb8c2aed987..3d8d1d0bcb64b2496dc5ea9ea0dbf2a37383539b 100644 (file)
@@ -365,7 +365,7 @@ ENTRY(ret_from_interrupt)
        ldw     r1, PT_ESTATUS(sp)      /* check if returning to kernel */
        TSTBNZ  r1, r1, ESTATUS_EU, Luser_return
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        GET_THREAD_INFO r1
        ldw     r4, TI_PREEMPT_COUNT(r1)
        bne     r4, r0, restore_all
index b56af759dcdfcc2b8c3d19ff7bfed99bda0ddde2..819bdfcc2e714d64ba1657de319dcc1ba6b6a5da 100644 (file)
@@ -138,6 +138,14 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
                                return NULL;
        }
 
+       /*
+        * Map uncached objects in the low part of address space to
+        * CONFIG_NIOS2_IO_REGION_BASE
+        */
+       if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
+           IS_MAPPABLE_UNCACHEABLE(last_addr))
+               return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
+
        /* Mappings have to be page-aligned */
        offset = phys_addr & ~PAGE_MASK;
        phys_addr &= PAGE_MASK;
diff --git a/arch/openrisc/include/asm/vmalloc.h b/arch/openrisc/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..75435ec
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_OPENRISC_VMALLOC_H
+#define _ASM_OPENRISC_VMALLOC_H
+
+#endif /* _ASM_OPENRISC_VMALLOC_H */
index b16237c95ea33bd1fb75f50b91fbd6ae053b1687..71034b54d74e3e45c750ad2854621a3f65d0901b 100644 (file)
@@ -18,7 +18,7 @@ config PARISC
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
        select BUG
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select HAVE_PCI
        select HAVE_PERF_EVENTS
        select HAVE_KERNEL_BZIP2
@@ -62,6 +62,7 @@ config PARISC
        select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
        select HAVE_KPROBES_ON_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
+       select HAVE_COPY_THREAD_TLS
 
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
@@ -81,7 +82,7 @@ config STACK_GROWSUP
 config GENERIC_LOCKBREAK
        bool
        default y
-       depends on SMP && PREEMPT
+       depends on SMP && PREEMPTION
 
 config ARCH_HAS_ILOG2_U32
        bool
index f627c37dad9c92a7beeb277ead459726afcf5dad..ab5c215cf46c3d81a5ef840fe46ffe740174c7ec 100644 (file)
@@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
 **             if (((unsigned long)p & 0xf) == 0)
 **                     return __ldcw(p);
 */
-#define xchg(ptr, x) \
-       ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, x)                                                   \
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __typeof__(*(ptr)) _x_ = (x);                                   \
+       __ret = (__typeof__(*(ptr)))                                    \
+               __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr)));      \
+       __ret;                                                          \
+})
 
 /* bug catcher for when unsupported size is used - won't link */
 extern void __cmpxchg_called_with_bad_pointer(void);
index 46212b52c23e312a483a3ada6a517a4d4f0e9f4e..cab8f64ca4a20f8882ba9f17b3373d6342759af0 100644 (file)
@@ -128,9 +128,8 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr)
  * The standard PCI ioremap interfaces
  */
 void __iomem *ioremap(unsigned long offset, unsigned long size);
-#define ioremap_nocache(off, sz)       ioremap((off), (sz))
-#define ioremap_wc                     ioremap_nocache
-#define ioremap_uc                     ioremap_nocache
+#define ioremap_wc                     ioremap
+#define ioremap_uc                     ioremap
 
 extern void iounmap(const volatile void __iomem *addr);
 
index a99ea747d7edefb681d7eca88917efaca7c245aa..87e17400699551f07af20cf152392bf76c26d9df 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _ASM_PARISC_KEXEC_H
 #define _ASM_PARISC_KEXEC_H
 
-#ifdef CONFIG_KEXEC
-
 /* Maximum physical address we can use pages from */
 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
 /* Maximum address we can reach in physical address mode */
@@ -32,6 +30,4 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
 
 #endif /* __ASSEMBLY__ */
 
-#endif /* CONFIG_KEXEC */
-
 #endif /* _ASM_PARISC_KEXEC_H */
diff --git a/arch/parisc/include/asm/vmalloc.h b/arch/parisc/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..1088ae4
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_PARISC_VMALLOC_H
+#define _ASM_PARISC_VMALLOC_H
+
+#endif /* _ASM_PARISC_VMALLOC_H */
index 2663c8f8be115a0929b2e6f933f4b3e05ca9e98d..068d90950d9378e0a50e13d531c6c16cd85751d7 100644 (file)
@@ -37,5 +37,5 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER)   += ftrace.o
 obj-$(CONFIG_JUMP_LABEL)               += jump_label.o
 obj-$(CONFIG_KGDB)                     += kgdb.o
 obj-$(CONFIG_KPROBES)                  += kprobes.o
-obj-$(CONFIG_KEXEC)                    += kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE)               += kexec.o relocate_kernel.o
 obj-$(CONFIG_KEXEC_FILE)               += kexec_file.o
index 3b330e58a4f033e19ca59a711a0c8e3ddd6842c1..a5f3e50fe97619eff506b92fceb75699bb530548 100644 (file)
@@ -810,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath);
 static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
                             struct device *parent);
 
-static void walk_lower_bus(struct parisc_device *dev)
+static void __init walk_lower_bus(struct parisc_device *dev)
 {
        unsigned long io_io_low, io_io_high;
 
@@ -889,8 +889,8 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
-               ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
+       pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+               ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
        if (dev->num_addrs) {
index b96d7449697791260fdf2a28985ba7174f412668..9a03e29c87330f3f00228ccdec62c40f9cbab2aa 100644 (file)
@@ -940,14 +940,14 @@ intr_restore:
        rfi
        nop
 
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
 # define intr_do_preempt       intr_restore
-#endif /* !CONFIG_PREEMPT */
+#endif /* !CONFIG_PREEMPTION */
 
        .import schedule,code
 intr_do_resched:
        /* Only call schedule on return to userspace. If we're returning
-        * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+        * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
         * we jump back to intr_restore.
         */
        LDREG   PT_IASQ0(%r16), %r20
@@ -979,7 +979,7 @@ intr_do_resched:
         * and preempt_count is 0. otherwise, we continue on
         * our merry way back to the current running task.
         */
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        .import preempt_schedule_irq,code
 intr_do_preempt:
        rsm     PSW_SM_I, %r0           /* disable interrupts */
@@ -999,7 +999,7 @@ intr_do_preempt:
        nop
 
        b,n     intr_restore            /* ssm PSW_SM_I done by intr_restore */
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
        /*
         * External interrupts.
index 36434d4da381a2691fea6918b2eda3203bc44dc9..749c4579db0d5542ac274a8198f9f3d825be4d3b 100644 (file)
@@ -327,8 +327,7 @@ static int pdt_mainloop(void *unused)
                            ((pde & PDT_ADDR_SINGLE_ERR) == 0))
                                memory_failure(pde >> PAGE_SHIFT, 0);
                        else
-                               soft_offline_page(
-                                       pfn_to_page(pde >> PAGE_SHIFT), 0);
+                               soft_offline_page(pde >> PAGE_SHIFT, 0);
 #else
                        pr_crit("PDT: memory error at 0x%lx ignored.\n"
                                "Rebuild kernel with CONFIG_MEMORY_FAILURE=y "
index 676683641d00c9a3e3798d8ce225de00b451eebd..e1a8fee3ad491f62424c02769769235d15a0297e 100644 (file)
@@ -792,7 +792,7 @@ static int perf_write_image(uint64_t *memaddr)
                return -1;
        }
 
-       runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+       runway = ioremap(cpu_device->hpa.start, 4096);
        if (!runway) {
                pr_err("perf_write_image: ioremap failed!\n");
                return -ENOMEM;
index ecc5c277120871efe35a6cebc6158397790d1c0e..230a6422b99f369c52a5d90d113a9a9ffd94b77c 100644 (file)
@@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init);
  * Copy architecture-specific thread state
  */
 int
-copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long kthread_arg, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+           unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *cregs = &(p->thread.regs);
        void *stack = task_stack_page(p);
@@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
                cregs->kpc = (unsigned long) &child_return;
 
-               /* Setup thread TLS area from the 4th parameter in clone */
+               /* Setup thread TLS area */
                if (clone_flags & CLONE_SETTLS)
-                       cregs->cr27 = cregs->gr[23];
+                       cregs->cr27 = tls;
        }
 
        return 0;
index ddca8287d43bac22e772854d7100975312146866..354cf060b67fc1d4964d0e24cb811b4a48c5cc32 100644 (file)
@@ -401,7 +401,7 @@ static void __init map_pages(unsigned long start_vaddr,
                        pmd = (pmd_t *) __pa(pmd);
                }
 
-               pgd_populate(NULL, pg_dir, __va(pmd));
+               pud_populate(NULL, (pud_t *)pg_dir, __va(pmd));
 #endif
                pg_dir++;
 
index 1ec34e16ed65d38c1e7a7d0c44e11c01a1fa7a71..e7c607059f546404a65e912b6f2d16c070ff27d8 100644 (file)
@@ -106,7 +106,7 @@ config LOCKDEP_SUPPORT
 config GENERIC_LOCKBREAK
        bool
        default y
-       depends on SMP && PREEMPT
+       depends on SMP && PREEMPTION
 
 config GENERIC_HWEIGHT
        bool
@@ -149,7 +149,7 @@ config PPC
        select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_WEAK_RELEASE_ACQUIRE
        select BINFMT_ELF
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS
        select DCACHE_WORD_ACCESS               if PPC64 && CPU_LITTLE_ENDIAN
        select DYNAMIC_FTRACE                   if FUNCTION_TRACER
@@ -455,11 +455,7 @@ config PPC_TRANSACTIONAL_MEM
 config PPC_UV
        bool "Ultravisor support"
        depends on KVM_BOOK3S_HV_POSSIBLE
-       select ZONE_DEVICE
-       select DEV_PAGEMAP_OPS
-       select DEVICE_PRIVATE
-       select MEMORY_HOTPLUG
-       select MEMORY_HOTREMOVE
+       depends on DEVICE_PRIVATE
        default n
        help
          This option paravirtualizes the kernel to run in POWER platforms that
index e1a961f05dcd5b04b640827810cd198ff6fcb753..baa0c503e741b87f89609ca66c907fd5b36ecea7 100644 (file)
@@ -63,6 +63,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy0: ethernet-phy@0 {
                        reg = <0x0>;
index c288f3c6c6378dc5fd3c20cf1605ecd031013c89..93095600e8086f617b515047a4c70275d1cf88f8 100644 (file)
@@ -60,6 +60,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy6: ethernet-phy@0 {
                        reg = <0x0>;
index 94f3e71750124b5c8a58861909d5d017b05262ca..ff4bd38f064599947096249e51149f8a0e43383f 100644 (file)
@@ -63,6 +63,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy1: ethernet-phy@0 {
                        reg = <0x0>;
index 94a76982d214b7b963d4b66a9f5161793a2b8b55..1fa38ed6f59e269c70a6de0e8ca41d40024bcb62 100644 (file)
@@ -60,6 +60,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy7: ethernet-phy@0 {
                        reg = <0x0>;
index b5ff5f71c6b8b01874dd1b436e416409c2ac0759..a8cc9780c0c4225ac48c3fba7ca096089bf1c2f3 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy0: ethernet-phy@0 {
                        reg = <0x0>;
index ee44182c634853566c9740b04631c3564e8bd020..8b8bd70c93823df4ca8fc71574f6e9417c7bf1cb 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy1: ethernet-phy@0 {
                        reg = <0x0>;
index f05f0d775039b53aab57cdcd28fa8ac3ebde3490..619c880b54d8d39202cb58579974992ef2bf919f 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe5000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy2: ethernet-phy@0 {
                        reg = <0x0>;
index a9114ec510759e74036766b1ec1dbcaf42bcfa07..d7ebb73a400d06978aaedadd2efd62770159393c 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe7000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy3: ethernet-phy@0 {
                        reg = <0x0>;
index 44dd00ac7367fd6a100131eec3b7388741062383..b151d696a0699b24e90a5c290dba6cc41e6acccf 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe9000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy4: ethernet-phy@0 {
                        reg = <0x0>;
index 5b1b84b58602fa46e06cebb7be6b390d59372434..adc0ae0013a3ce89f8d1e26bd14a6f6f15af4526 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xeb000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy5: ethernet-phy@0 {
                        reg = <0x0>;
index 0e1daaef9e74b8fe2b6f51369a11c66b238e7dfd..435047e0e250e2e95d73d0e3de6b605060beb378 100644 (file)
@@ -60,6 +60,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy14: ethernet-phy@0 {
                        reg = <0x0>;
index 68c5ef779266a8692e712a0ddcfa51db0822d945..c098657cca0a7b8330590ef69b129af4b06b8e4d 100644 (file)
@@ -60,6 +60,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy15: ethernet-phy@0 {
                        reg = <0x0>;
index 605363cc1117fbf38b334e68a9b21815325cfe74..9d06824815f342ca7b1d30c5e94159da76e936e9 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy8: ethernet-phy@0 {
                        reg = <0x0>;
index 1955dfa136348f12ea29e0c4fdd3abb7438784fe..70e947730c4ba72b715763fb1fe2ddf6c6ef0fec 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy9: ethernet-phy@0 {
                        reg = <0x0>;
index 2c1476454ee01bb81069c325ed7c02c20f70963d..ad96e6529595960f14be688feee58d3e3a6b84e9 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe5000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy10: ethernet-phy@0 {
                        reg = <0x0>;
index b8b541ff5fb034e597c427431e15b4e01ed0de26..034bc4b71f7a506a064b37c320beea2bedd0f390 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe7000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy11: ethernet-phy@0 {
                        reg = <0x0>;
index 4b2cfddd1b1553bbb34d53a236cd399fb96441b1..93ca23d82b39ba9cfdf0558afe1da18d0518550d 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe9000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy12: ethernet-phy@0 {
                        reg = <0x0>;
index 0a52ddf7cc171e6b830bc2707bc0be9d8ac26ee0..23b3117a2fd2a3871627bc34a68e14d0159e52ca 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xeb000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy13: ethernet-phy@0 {
                        reg = <0x0>;
index fbe8df433019007e4122d241cdfccfdae270e291..123adcefd40f691185112b0794130d2bb8abf961 100644 (file)
@@ -18,8 +18,6 @@
  * mb() prevents loads and stores being reordered across this point.
  * rmb() prevents loads being reordered across this point.
  * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- *     across this point (nop on PPC).
  *
  * *mb() variants without smp_ prefix must order all types of memory
  * operations with one another. sync is the only instruction sufficient
index 15b75005bc34e007f45e3566c8abe663284f4753..3fa1b962dc2799e451047e8f4ccc6e15a828a7f5 100644 (file)
@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
  *
  */
 #define MAX_USER_CONTEXT       ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
 #define MIN_USER_CONTEXT       (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
-                                MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
+                                MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
 /*
  * For platforms that support on 65bit VA we limit the context bits
  */
index a63ec938636de5107579dfc5db3af416d1393b3d..635969b5b58e529c875c10cdce5f112537b79728 100644 (file)
@@ -691,8 +691,6 @@ static inline void iosync(void)
  * * ioremap_prot allows to specify the page flags as an argument and can
  *   also be hooked by the platform via ppc_md.
  *
- * * ioremap_nocache is identical to ioremap
- *
  * * ioremap_wc enables write combining
  *
  * * ioremap_wt enables write through
@@ -715,7 +713,6 @@ extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
 extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
 void __iomem *ioremap_wt(phys_addr_t address, unsigned long size);
 void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
-#define ioremap_nocache(addr, size)    ioremap((addr), (size))
 #define ioremap_uc(addr, size)         ioremap((addr), (size))
 #define ioremap_cache(addr, size) \
        ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
index e9a960e28f3c8468af557533ce3f9b71154d7cac..860228e917dce1d4e8279a4b1a1f97834e606d2c 100644 (file)
@@ -15,6 +15,7 @@
  *
  * (the type definitions are in asm/spinlock_types.h)
  */
+#include <linux/jump_label.h>
 #include <linux/irqflags.h>
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 #endif
 
 #ifdef CONFIG_PPC_PSERIES
+DECLARE_STATIC_KEY_FALSE(shared_processor);
+
 #define vcpu_is_preempted vcpu_is_preempted
 static inline bool vcpu_is_preempted(int cpu)
 {
-       if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+       if (!static_branch_unlikely(&shared_processor))
                return false;
        return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
 }
@@ -110,13 +113,8 @@ static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
 
 static inline bool is_shared_processor(void)
 {
-/*
- * LPPACA is only available on Pseries so guard anything LPPACA related to
- * allow other platforms (which include this common header) to compile.
- */
-#ifdef CONFIG_PPC_PSERIES
-       return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
-               lppaca_shared_proc(local_paca->lppaca_ptr));
+#ifdef CONFIG_PPC_SPLPAR
+       return static_branch_unlikely(&shared_processor);
 #else
        return false;
 #endif
index 15002b51ff18df83d5d5a5c37c2c65462fffcdc3..c92fe7fe9692ceedcb98d2a88afdecea689bbaca 100644 (file)
@@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
        return n;
 }
 
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
+unsigned long __arch_clear_user(void __user *addr, unsigned long size);
 
 static inline unsigned long clear_user(void __user *addr, unsigned long size)
 {
@@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
        might_fault();
        if (likely(access_ok(addr, size))) {
                allow_write_to_user(addr, size);
-               ret = __clear_user(addr, size);
+               ret = __arch_clear_user(addr, size);
                prevent_write_to_user(addr, size);
        }
        return ret;
 }
 
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+       return clear_user(addr, size);
+}
+
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..b992dfa
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_POWERPC_VMALLOC_H
+#define _ASM_POWERPC_VMALLOC_H
+
+#endif /* _ASM_POWERPC_VMALLOC_H */
index f2dfcd50a2d3e67763b8ebf7f32d20edae34688e..33aee7490cbb1fc2b4ca57a029578f56297c72db 100644 (file)
@@ -39,6 +39,7 @@
 
 #define XIVE_ESB_VAL_P         0x2
 #define XIVE_ESB_VAL_Q         0x1
+#define XIVE_ESB_INVALID       0xFF
 
 /*
  * Thread Management (aka "TM") registers
index d60908ea37fb9ba293a293584c008312f45abd84..e1a4c39b83b86e643f84f0e0211de9c8aacc36c5 100644 (file)
@@ -897,7 +897,7 @@ resume_kernel:
        bne-    0b
 1:
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        /* check current_thread_info->preempt_count */
        lwz     r0,TI_PREEMPT(r2)
        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
@@ -921,7 +921,7 @@ resume_kernel:
         */
        bl      trace_hardirqs_on
 #endif
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 restore_kuap:
        kuap_restore r1, r2, r9, r10, r0
 
index 3fd3ef352e3fde8f1f83772c6df71e63e21483e6..a9a1d3cdb523bb1449697ce46b67e864032ffe41 100644 (file)
@@ -846,7 +846,7 @@ resume_kernel:
        bne-    0b
 1:
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        /* Check if we need to preempt */
        andi.   r0,r4,_TIF_NEED_RESCHED
        beq+    restore
@@ -877,7 +877,7 @@ resume_kernel:
        li      r10,MSR_RI
        mtmsrd  r10,1             /* Update machine state */
 #endif /* CONFIG_PPC_BOOK3E */
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
        .globl  fast_exc_return_irq
 fast_exc_return_irq:
index 5645bc9cbc09a28c1fbc324743e4d568c3033dc6..add67498c126bffdc8050f804bed16fa97efd8e4 100644 (file)
@@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs)
 
        trace_irq_entry(regs);
 
-       check_stack_overflow();
-
        /*
         * Query the platform PIC for the interrupt & ack it.
         *
@@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs)
        irqsp = hardirq_ctx[raw_smp_processor_id()];
        sirqsp = softirq_ctx[raw_smp_processor_id()];
 
+       check_stack_overflow();
+
        /* Already there ? */
        if (unlikely(cursp == irqsp || cursp == sirqsp)) {
                __do_irq(regs);
index dc53578193ee00ef0cd39b43465af4569a51f5b2..6ff3f896d90816efa520657cb747be3a0ea0572d 100644 (file)
@@ -4983,7 +4983,8 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
                if (nesting_enabled(kvm))
                        kvmhv_release_all_nested(kvm);
                kvm->arch.process_table = 0;
-               uv_svm_terminate(kvm->arch.lpid);
+               if (kvm->arch.secure_guest)
+                       uv_svm_terminate(kvm->arch.lpid);
                kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
        }
 
index 0496e66aaa565fbe69f8fb238f503b0eccb6a27f..c6fbbd29bd8717a44bcac3cc5706269867d9d272 100644 (file)
@@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        ld      r7, VCPU_GPR(R7)(r4)
        bne     ret_to_ultra
 
-       lwz     r0, VCPU_CR(r4)
+       l     r0, VCPU_CR(r4)
        mtcr    r0
 
        ld      r0, VCPU_GPR(R0)(r4)
@@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  *   R3 = UV_RETURN
  */
 ret_to_ultra:
-       lwz     r0, VCPU_CR(r4)
+       l     r0, VCPU_CR(r4)
        mtcr    r0
 
        ld      r0, VCPU_GPR(R3)(r4)
index f69a6aab7bfbb5cb65fd082eaedb939d46c12149..1ddb26394e8ac5a1739b6f5aaf12512e5c3ed9f6 100644 (file)
@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
 CACHELINE_MASK = (L1_CACHE_BYTES-1)
 
-_GLOBAL(__clear_user)
+_GLOBAL(__arch_clear_user)
 /*
  * Use dcbz on the complete cache lines in the destination
  * to set them to zero.  This requires that the destination
@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
        EX_TABLE(8b, 91b)
        EX_TABLE(9b, 91b)
 
-EXPORT_SYMBOL(__clear_user)
+EXPORT_SYMBOL(__arch_clear_user)
index 507b18b1660e6e2221242b24d9b8e90055d42842..169872bc08928aa5fc566355ac0e5165985a1d00 100644 (file)
@@ -17,7 +17,7 @@ PPC64_CACHES:
        .section        ".text"
 
 /**
- * __clear_user: - Zero a block of memory in user space, with less checking.
+ * __arch_clear_user: - Zero a block of memory in user space, with less checking.
  * @to:   Destination address, in user space.
  * @n:    Number of bytes to zero.
  *
@@ -58,7 +58,7 @@ err3; stb     r0,0(r3)
        mr      r3,r4
        blr
 
-_GLOBAL_TOC(__clear_user)
+_GLOBAL_TOC(__arch_clear_user)
        cmpdi   r4,32
        neg     r6,r3
        li      r0,0
@@ -181,4 +181,4 @@ err1;       dcbz    0,r3
        cmpdi   r4,32
        blt     .Lshort_clear
        b       .Lmedium_clear
-EXPORT_SYMBOL(__clear_user)
+EXPORT_SYMBOL(__arch_clear_user)
index 9488b63dfc872d2bb7fc3350e1adf22251581f58..f5535eae637fbcf52c91d7e6a7dea4e5c5a7e0ab 100644 (file)
@@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
        int ret;
 
-       __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 
        /* Remove htab bolted mappings for this section of memory */
        start = (unsigned long)__va(start);
@@ -289,6 +288,14 @@ void __init mem_init(void)
        BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
 
 #ifdef CONFIG_SWIOTLB
+       /*
+        * Some platforms (e.g. 85xx) limit DMA-able memory way below
+        * 4G. We force memblock to bottom-up mode to ensure that the
+        * memory allocated in swiotlb_init() is DMA-able.
+        * As it's the last memblock allocation, no need to reset it
+        * back to to-down.
+        */
+       memblock_set_bottom_up(true);
        swiotlb_init(0);
 #endif
 
index 090af2d2d3e4c512f4e04356d01bfe968869cc35..96eb8e43f39b5484db1f22192ff20740276b7a2b 100644 (file)
@@ -103,7 +103,7 @@ static void mmu_patch_addis(s32 *site, long simm)
        patch_instruction_site(site, instr);
 }
 
-void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
+static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
 {
        unsigned long s = offset;
        unsigned long v = PAGE_OFFSET + s;
index 42bbcd47cc85ffb1f6900a3ec045db5a2561138d..dffe1a45b6ed4df131f8e18c6cbe851bd1b3fded 100644 (file)
@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
 
 #endif
 
-static inline bool slice_addr_is_low(unsigned long addr)
+static inline notrace bool slice_addr_is_low(unsigned long addr)
 {
        u64 tmp = (u64)addr;
 
@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
                                       mm_ctx_user_psize(&current->mm->context), 1);
 }
 
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
+unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
 {
        unsigned char *psizes;
        int index, mask_index;
index 6e5a2a4faeab0d9fe4b49851ecee60d6e7baada4..4ec2a9f14f845cebe96449faeb32914abcf55eae 100644 (file)
@@ -97,12 +97,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PPC64
 #define PPC_BPF_LOAD_CPU(r)            \
-       do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2);   \
+       do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2);   \
                PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index));  \
        } while (0)
 #else
 #define PPC_BPF_LOAD_CPU(r)     \
-       do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4);          \
+       do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4);          \
                PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu));          \
        } while(0)
 #endif
index d57b46e0dd6044d1a918e8a518f4fead29ad5c0e..0acc9d5fb19e9a638c8bdc4f6f699d5b62689e7d 100644 (file)
@@ -321,7 +321,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                        ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
                        break;
                case BPF_LD | BPF_W | BPF_LEN: /*       A = skb->len; */
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+                       BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
                        break;
                case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
@@ -333,16 +333,16 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 
                        /*** Ancillary info loads ***/
                case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+                       BUILD_BUG_ON(sizeof_field(struct sk_buff,
                                                  protocol) != 2);
                        PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                            protocol));
                        break;
                case BPF_ANC | SKF_AD_IFINDEX:
                case BPF_ANC | SKF_AD_HATYPE:
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+                       BUILD_BUG_ON(sizeof_field(struct net_device,
                                                ifindex) != 4);
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+                       BUILD_BUG_ON(sizeof_field(struct net_device,
                                                type) != 2);
                        PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
                                                                dev));
@@ -365,17 +365,17 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 
                        break;
                case BPF_ANC | SKF_AD_MARK:
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+                       BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          mark));
                        break;
                case BPF_ANC | SKF_AD_RXHASH:
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+                       BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          hash));
                        break;
                case BPF_ANC | SKF_AD_VLAN_TAG:
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+                       BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
 
                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          vlan_tci));
@@ -388,7 +388,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                                PPC_ANDI(r_A, r_A, 1);
                        break;
                case BPF_ANC | SKF_AD_QUEUE:
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+                       BUILD_BUG_ON(sizeof_field(struct sk_buff,
                                                  queue_mapping) != 2);
                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          queue_mapping));
index 91571841df8a70d0ea2520dd9c3977e6f85ccc24..9dba7e88088530726862179d53ebc5f8ff986ce6 100644 (file)
@@ -539,6 +539,16 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
        /* balloon page list reference */
        get_page(newpage);
 
+       /*
+        * When we migrate a page to a different zone, we have to fixup the
+        * count of both involved zones as we adjusted the managed page count
+        * when inflating.
+        */
+       if (page_zone(page) != page_zone(newpage)) {
+               adjust_managed_page_count(page, 1);
+               adjust_managed_page_count(newpage, -1);
+       }
+
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
        balloon_page_insert(b_dev_info, newpage);
        balloon_page_delete(page);
index 0a40201f315ffd14dd39c9a7b71274c1d3db25a3..0c8421dd01ab5bc911e24f340dbe80407089c204 100644 (file)
@@ -74,6 +74,9 @@
 #include "pseries.h"
 #include "../../../../drivers/pci/pci.h"
 
+DEFINE_STATIC_KEY_FALSE(shared_processor);
+EXPORT_SYMBOL_GPL(shared_processor);
+
 int CMO_PrPSP = -1;
 int CMO_SecPSP = -1;
 unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
@@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void)
 
        if (firmware_has_feature(FW_FEATURE_LPAR)) {
                vpa_init(boot_cpuid);
+
+               if (lppaca_shared_proc(get_lppaca()))
+                       static_branch_enable(&shared_processor);
+
                ppc_md.power_save = pseries_lpar_idle;
                ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
 #ifdef CONFIG_PCI_IOV
index f5fadbd2533a8f1c5b5512fba1423a22b714bd34..9651ca061828a06be50d5636508d90505fdcfcc2 100644 (file)
@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
                                  enum irqchip_irq_state which, bool *state)
 {
        struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
+       u8 pq;
 
        switch (which) {
        case IRQCHIP_STATE_ACTIVE:
-               *state = !xd->stale_p &&
-                        (xd->saved_p ||
-                         !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
+               pq = xive_esb_read(xd, XIVE_ESB_GET);
+
+               /*
+                * The esb value being all 1's means we couldn't get
+                * the PQ state of the interrupt through mmio. It may
+                * happen, for example when querying a PHB interrupt
+                * while the PHB is in an error state. We consider the
+                * interrupt to be inactive in that case.
+                */
+               *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
+                       (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
                return 0;
        default:
                return -EINVAL;
index 759ffb00267cf1235cf6b40320994734d571ef7a..fa7dc03459e7f1e7168e9dbef0a3ecf14dae175e 100644 (file)
@@ -64,6 +64,8 @@ config RISCV
        select SPARSEMEM_STATIC if 32BIT
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select HAVE_COPY_THREAD_TLS
 
 config ARCH_MMAP_RND_BITS_MIN
        default 18 if 64BIT
@@ -154,7 +156,7 @@ config GENERIC_HWEIGHT
        def_bool y
 
 config FIX_EARLYCON_MEM
-       def_bool CONFIG_MMU
+       def_bool MMU
 
 config PGTABLE_LEVELS
        int
index 634759ac8c717a8d0c2b5774ceb9ec9ce53da86b..d325b67d00dfcf70c4f3cf89e68e3964eec62a06 100644 (file)
@@ -2,8 +2,8 @@ menu "SoC selection"
 
 config SOC_SIFIVE
        bool "SiFive SoCs"
-       select SERIAL_SIFIVE
-       select SERIAL_SIFIVE_CONSOLE
+       select SERIAL_SIFIVE if TTY
+       select SERIAL_SIFIVE_CONSOLE if TTY
        select CLK_SIFIVE
        select CLK_SIFIVE_FU540_PRCI
        select SIFIVE_PLIC
index a474f98ce4fae8d49e65dec1f8da26d72c70745b..36db8145f9f46013c2b1b31e3c7e9f75e4dce349 100644 (file)
@@ -24,7 +24,7 @@ $(obj)/Image: vmlinux FORCE
 $(obj)/Image.gz: $(obj)/Image FORCE
        $(call if_changed,gzip)
 
-loader.o: $(src)/loader.S $(obj)/Image
+$(obj)/loader.o: $(src)/loader.S $(obj)/Image
 
 $(obj)/loader: $(obj)/loader.o $(obj)/Image $(obj)/loader.lds FORCE
        $(Q)$(LD) -T $(obj)/loader.lds -o $@ $(obj)/loader.o
index 70a1891e7cd07f472ede9f221e854671cbb23a3a..a2e3d54e830cc2c2de4ab0382254adf8674a11a5 100644 (file)
@@ -54,6 +54,7 @@
                        reg = <1>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu1_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
@@ -77,6 +78,7 @@
                        reg = <2>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu2_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
                        reg = <3>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu3_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
                        reg = <4>;
                        riscv,isa = "rv64imafdc";
                        tlb-split;
+                       next-level-cache = <&l2cache>;
                        cpu4_intc: interrupt-controller {
                                #interrupt-cells = <1>;
                                compatible = "riscv,cpu-intc";
                        #pwm-cells = <3>;
                        status = "disabled";
                };
+               l2cache: cache-controller@2010000 {
+                       compatible = "sifive,fu540-c000-ccache", "cache";
+                       cache-block-size = <64>;
+                       cache-level = <2>;
+                       cache-sets = <1024>;
+                       cache-size = <2097152>;
+                       cache-unified;
+                       interrupt-parent = <&plic0>;
+                       interrupts = <1 2 3>;
+                       reg = <0x0 0x2010000 0x0 0x1000>;
+               };
 
        };
 };
index dd62b691c443d821163e7460c4376b482c30396f..27e005fca5849e059bc920d937250bdb251ff35c 100644 (file)
@@ -5,4 +5,8 @@
 #include <linux/ftrace.h>
 #include <asm-generic/asm-prototypes.h>
 
+long long __lshrti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __ashlti3(long long a, int b);
+
 #endif /* _ASM_RISCV_PROTOTYPES_H */
index 0a62d2d684552da8c665d17632bc590e61b1dbc0..435b65532e2945703cc17a9e5c4f7613a0d5b6b8 100644 (file)
 # define SR_PIE                SR_MPIE
 # define SR_PP         SR_MPP
 
-# define IRQ_SOFT      IRQ_M_SOFT
-# define IRQ_TIMER     IRQ_M_TIMER
-# define IRQ_EXT       IRQ_M_EXT
+# define RV_IRQ_SOFT           IRQ_M_SOFT
+# define RV_IRQ_TIMER  IRQ_M_TIMER
+# define RV_IRQ_EXT            IRQ_M_EXT
 #else /* CONFIG_RISCV_M_MODE */
 # define CSR_STATUS    CSR_SSTATUS
 # define CSR_IE                CSR_SIE
 # define SR_PIE                SR_SPIE
 # define SR_PP         SR_SPP
 
-# define IRQ_SOFT      IRQ_S_SOFT
-# define IRQ_TIMER     IRQ_S_TIMER
-# define IRQ_EXT       IRQ_S_EXT
+# define RV_IRQ_SOFT           IRQ_S_SOFT
+# define RV_IRQ_TIMER  IRQ_S_TIMER
+# define RV_IRQ_EXT            IRQ_S_EXT
 #endif /* CONFIG_RISCV_M_MODE */
 
 /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
-#define IE_SIE         (_AC(0x1, UL) << IRQ_SOFT)
-#define IE_TIE         (_AC(0x1, UL) << IRQ_TIMER)
-#define IE_EIE         (_AC(0x1, UL) << IRQ_EXT)
+#define IE_SIE         (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE         (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE         (_AC(0x1, UL) << RV_IRQ_EXT)
 
 #ifndef __ASSEMBLY__
 
index 7ff0ed4f292e48fbc267d77fdbad6e69f0a6e92e..36ae0176135277e633a247ccfb2055111614e816 100644 (file)
@@ -90,6 +90,27 @@ extern pgd_t swapper_pg_dir[];
 #define __S110 PAGE_SHARED_EXEC
 #define __S111 PAGE_SHARED_EXEC
 
+#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END      (PAGE_OFFSET - 1)
+#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VMEMMAP_SHIFT \
+       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END    (VMALLOC_START - 1)
+#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap                ((struct page *)VMEMMAP_START)
+
 static inline int pmd_present(pmd_t pmd)
 {
        return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -400,23 +421,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END      (PAGE_OFFSET - 1)
-#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
-
-/*
- * Roughly size the vmemmap space to be large enough to fit enough
- * struct pages to map half the virtual address space. Then
- * position vmemmap directly below the VMALLOC region.
- */
-#define VMEMMAP_SHIFT \
-       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
-#define VMEMMAP_END    (VMALLOC_START - 1)
-#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
-
-#define vmemmap                ((struct page *)VMEMMAP_START)
-
 #define PCI_IO_SIZE      SZ_16M
 #define PCI_IO_END       VMEMMAP_START
 #define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..ff9abc0
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_RISCV_VMALLOC_H
+#define _ASM_RISCV_VMALLOC_H
+
+#endif /* _ASM_RISCV_VMALLOC_H */
index a1349ca6466961d8eef510a9d4de9391c0f7c5d1..bad4d85b5e91810332719af05cb8d66009cf8982 100644 (file)
@@ -155,7 +155,7 @@ _save_context:
        REG_L x2,  PT_SP(sp)
        .endm
 
-#if !IS_ENABLED(CONFIG_PREEMPT)
+#if !IS_ENABLED(CONFIG_PREEMPTION)
 .set resume_kernel, restore_all
 #endif
 
@@ -246,6 +246,7 @@ check_syscall_nr:
         */
        li t1, -1
        beq a7, t1, ret_from_syscall_rejected
+       blt a7, t1, 1f
        /* Call syscall */
        la s0, sys_call_table
        slli t0, a7, RISCV_LGPTR
@@ -304,7 +305,7 @@ restore_all:
        sret
 #endif
 
-#if IS_ENABLED(CONFIG_PREEMPT)
+#if IS_ENABLED(CONFIG_PREEMPTION)
 resume_kernel:
        REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
        bnez s0, restore_all
index b94d8db5ddccbcac050da196c59c926a738894dc..c40fdcdeb950a59f48f1be56ff8776253883bf90 100644 (file)
@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
         */
        old = *parent;
 
-       if (function_graph_enter(old, self_addr, frame_pointer, parent))
+       if (!function_graph_enter(old, self_addr, frame_pointer, parent))
                *parent = return_hooker;
 }
 
index 84a6f0a4b120b4c8dbc0d171d9472fd17c862797..a4242be66966b2bdbfaee559b93b905e8a0e0d45 100644 (file)
@@ -80,7 +80,9 @@ _start_kernel:
 
 #ifdef CONFIG_SMP
        li t0, CONFIG_NR_CPUS
-       bgeu a0, t0, .Lsecondary_park
+       blt a0, t0, .Lgood_cores
+       tail .Lsecondary_park
+.Lgood_cores:
 #endif
 
        /* Pick one hart to run the main boot sequence */
@@ -209,11 +211,6 @@ relocate:
        tail smp_callin
 #endif
 
-.align 2
-.Lsecondary_park:
-       /* We lack SMP support or have too many harts, so park this hart */
-       wfi
-       j .Lsecondary_park
 END(_start)
 
 #ifdef CONFIG_RISCV_M_MODE
@@ -246,12 +243,12 @@ ENTRY(reset_regs)
        li      t4, 0
        li      t5, 0
        li      t6, 0
-       csrw    sscratch, 0
+       csrw    CSR_SCRATCH, 0
 
 #ifdef CONFIG_FPU
        csrr    t0, CSR_MISA
        andi    t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
-       bnez    t0, .Lreset_regs_done
+       beqz    t0, .Lreset_regs_done
 
        li      t1, SR_FS
        csrs    CSR_STATUS, t1
@@ -295,6 +292,13 @@ ENTRY(reset_regs)
 END(reset_regs)
 #endif /* CONFIG_RISCV_M_MODE */
 
+.section ".text", "ax",@progbits
+.align 2
+.Lsecondary_park:
+       /* We lack SMP support or have too many harts, so park this hart */
+       wfi
+       j .Lsecondary_park
+
 __PAGE_ALIGNED_BSS
        /* Empty zero page */
        .balign PAGE_SIZE
index 3f07a91d5afb4571bb30eb706bd2bb9c49dacc7e..345c4f2eba13f41a58cdf2f2193f08371863a68f 100644 (file)
@@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
 
        irq_enter();
        switch (regs->cause & ~CAUSE_IRQ_FLAG) {
-       case IRQ_TIMER:
+       case RV_IRQ_TIMER:
                riscv_timer_interrupt();
                break;
 #ifdef CONFIG_SMP
-       case IRQ_SOFT:
+       case RV_IRQ_SOFT:
                /*
                 * We only use software interrupts to pass IPIs, so if a non-SMP
                 * system gets one, then we don't know what to do.
@@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
                riscv_software_interrupt();
                break;
 #endif
-       case IRQ_EXT:
+       case RV_IRQ_EXT:
                handle_arch_irq(regs);
                break;
        default:
index 95a3031e5c7c9dcfdebaa06ba20f8da31424f905..817cf7b0974ced30d75d536b192b73b4e3c25bdf 100644 (file)
@@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        return 0;
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long usp,
-       unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+       unsigned long arg, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                if (usp) /* User fork */
                        childregs->sp = usp;
                if (clone_flags & CLONE_SETTLS)
-                       childregs->tp = childregs->a5;
+                       childregs->tp = tls;
                childregs->a0 = 0; /* Return value of fork() */
                p->thread.ra = (unsigned long)ret_from_fork;
        }
index 4800cf703186d389a6fd3446e3339375f332ee74..2a02b7eebee00b270f4d6b7365de0374c2beaba3 100644 (file)
@@ -9,8 +9,5 @@
 /*
  * Assembly functions that may be used (directly or indirectly) by modules
  */
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__asm_copy_to_user);
-EXPORT_SYMBOL(__asm_copy_from_user);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
index 49a5852fd07dd5a023c1899e80bffe542f6dedeb..33b16f4212f7a5fceb7feeff0a462e295bb4c0c0 100644 (file)
@@ -58,7 +58,8 @@ quiet_cmd_vdsold = VDSOLD  $@
       cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
                            -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
                    $(CROSS_COMPILE)objcopy \
-                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+                   rm $@.tmp
 
 # install commands for the unstripped file
 quiet_cmd_vdso_install = INSTALL $@
index 15f9d54c7db63859de60f86ced505165aa7a84cb..ef90075c4b0a9c153c02cdfc0d0fbdc98e151134 100644 (file)
@@ -4,34 +4,73 @@
  */
 
 #include <linux/linkage.h>
+#include <asm-generic/export.h>
 
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
        beqz    a2, .L1
        li      a5,64
        sub     a5,a5,a2
-       addi    sp,sp,-16
        sext.w  a4,a5
        blez    a5, .L2
        sext.w  a2,a2
-       sll     a4,a1,a4
        srl     a0,a0,a2
-       srl     a1,a1,a2
+       sll     a4,a1,a4
+       srl     a2,a1,a2
        or      a0,a0,a4
-       sd      a1,8(sp)
-       sd      a0,0(sp)
-       ld      a0,0(sp)
-       ld      a1,8(sp)
-       addi    sp,sp,16
-       ret
+       mv      a1,a2
 .L1:
        ret
 .L2:
-       negw    a4,a4
-       srl     a1,a1,a4
-       sd      a1,0(sp)
-       sd      zero,8(sp)
-       ld      a0,0(sp)
-       ld      a1,8(sp)
-       addi    sp,sp,16
+       negw    a0,a4
+       li      a2,0
+       srl     a0,a1,a0
+       mv      a1,a2
+       ret
+SYM_FUNC_END(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
+
+SYM_FUNC_START(__ashrti3)
+       beqz    a2, .L3
+       li      a5,64
+       sub     a5,a5,a2
+       sext.w  a4,a5
+       blez    a5, .L4
+       sext.w  a2,a2
+       srl     a0,a0,a2
+       sll     a4,a1,a4
+       sra     a2,a1,a2
+       or      a0,a0,a4
+       mv      a1,a2
+.L3:
+       ret
+.L4:
+       negw    a0,a4
+       srai    a2,a1,0x3f
+       sra     a0,a1,a0
+       mv      a1,a2
+       ret
+SYM_FUNC_END(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
+
+SYM_FUNC_START(__ashlti3)
+       beqz    a2, .L5
+       li      a5,64
+       sub     a5,a5,a2
+       sext.w  a4,a5
+       blez    a5, .L6
+       sext.w  a2,a2
+       sll     a1,a1,a2
+       srl     a4,a0,a4
+       sll     a2,a0,a2
+       or      a1,a1,a4
+       mv      a0,a2
+.L5:
+       ret
+.L6:
+       negw    a1,a4
+       li      a2,0
+       sll     a1,a0,a1
+       mv      a0,a2
        ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
index fecd65657a6fc0c3c1e4e18d281f0d2fb82c20f8..f29d2ba2c0a6ce02b80bba6032ddf9e1f897d484 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm-generic/export.h>
 #include <asm/asm.h>
 #include <asm/csr.h>
 
@@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user)
        j 3b
 ENDPROC(__asm_copy_to_user)
 ENDPROC(__asm_copy_from_user)
+EXPORT_SYMBOL(__asm_copy_to_user)
+EXPORT_SYMBOL(__asm_copy_from_user)
 
 
 ENTRY(__clear_user)
@@ -108,6 +111,7 @@ ENTRY(__clear_user)
        bltu a0, a3, 5b
        j 3b
 ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
 
        .section .fixup,"ax"
        .balign 4
index 3c8b332584579d97ddb49fec97d3dd159cb0adab..a1bd95c8047a1159cd8f4f497300497e496b0d6d 100644 (file)
@@ -10,7 +10,6 @@ obj-y += extable.o
 obj-$(CONFIG_MMU) += fault.o
 obj-y += cacheflush.o
 obj-y += context.o
-obj-y += sifive_l2_cache.o
 
 ifeq ($(CONFIG_MMU),y)
 obj-$(CONFIG_SMP) += tlbflush.o
index 8f190068664059d3ed2bfc5e40b27e93c1d7bb88..8930ab7278e6d51a7e14e05a765ca21a6e6e2798 100644 (file)
@@ -22,6 +22,7 @@ void flush_icache_all(void)
        else
                on_each_cpu(ipi_remote_fence_i, NULL, 1);
 }
+EXPORT_SYMBOL(flush_icache_all);
 
 /*
  * Performs an icache flush for the given MM context.  RISC-V has no direct
index 69f6678db7f370027e00f679f2a8262362fdc185..965a8cf4829ca33bfde4754bf47db0b8b17c6611 100644 (file)
@@ -99,13 +99,13 @@ static void __init setup_initrd(void)
                pr_info("initrd not found or empty");
                goto disable;
        }
-       if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+       if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
                pr_err("initrd extends beyond end of memory");
                goto disable;
        }
 
        size = initrd_end - initrd_start;
-       memblock_reserve(__pa(initrd_start), size);
+       memblock_reserve(__pa_symbol(initrd_start), size);
        initrd_below_start_ok = 1;
 
        pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@@ -124,8 +124,8 @@ void __init setup_bootmem(void)
 {
        struct memblock_region *reg;
        phys_addr_t mem_size = 0;
-       phys_addr_t vmlinux_end = __pa(&_end);
-       phys_addr_t vmlinux_start = __pa(&_start);
+       phys_addr_t vmlinux_end = __pa_symbol(&_end);
+       phys_addr_t vmlinux_start = __pa_symbol(&_start);
 
        /* Find the memory region containing the kernel */
        for_each_memblock(memory, reg) {
@@ -445,7 +445,7 @@ static void __init setup_vm_final(void)
 
        /* Setup swapper PGD for fixmap */
        create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
-                          __pa(fixmap_pgd_next),
+                          __pa_symbol(fixmap_pgd_next),
                           PGDIR_SIZE, PAGE_TABLE);
 
        /* Map all memory banks */
@@ -474,7 +474,7 @@ static void __init setup_vm_final(void)
        clear_fixmap(FIX_PMD);
 
        /* Move to swapper page table */
-       csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
+       csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
        local_flush_tlb_all();
 }
 #else
index 5451ef3845f2954583cd55fac13275e49eb9b31a..7fbf56aab6610dded8a262f8f0ef74ece105e41a 100644 (file)
@@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
                return -1;
        emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);
 
-       /* if (--TCC < 0)
+       /* if (TCC-- < 0)
         *     goto out;
         */
        emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
        off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
        if (is_13b_check(off, insn))
                return -1;
-       emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx);
+       emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx);
 
        /* prog = array->ptrs[index];
         * if (!prog)
index d4051e88e6250ddbb621b18c0dc6f8356411ffd8..287714d51b47754a63a1d0529cadab73133ea399 100644 (file)
@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
        def_bool y
 
 config GENERIC_LOCKBREAK
-       def_bool y if PREEMPT
+       def_bool y if PREEMPTTION
 
 config PGSTE
        def_bool y if KVM
@@ -110,7 +110,7 @@ config S390
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
        select ARCH_WANT_IPC_PARSE_VERSION
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS2
        select DYNAMIC_FTRACE if FUNCTION_TRACER
        select GENERIC_CLOCKEVENTS
@@ -124,6 +124,7 @@ config S390
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
+       select HAVE_ARCH_KASAN_VMALLOC
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
index b5ea9e14c017aae1966d08871cb7e2a43001849e..6ede29907fbf76424ed52de165db93af0e600933 100644 (file)
@@ -130,11 +130,11 @@ static inline bool should_resched(int preempt_offset)
 
 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 extern asmlinkage void preempt_schedule(void);
 #define __preempt_schedule() preempt_schedule()
 extern asmlinkage void preempt_schedule_notrace(void);
 #define __preempt_schedule_notrace() preempt_schedule_notrace()
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
 #endif /* __ASM_PREEMPT_H */
index 6dc6c4fbc8e2b1a59796afb96bff120d4c0f8a21..69289e99cabdcee175a4a176881a128f49e3a574 100644 (file)
@@ -27,7 +27,6 @@
 #define MACHINE_FLAG_DIAG9C    BIT(3)
 #define MACHINE_FLAG_ESOP      BIT(4)
 #define MACHINE_FLAG_IDTE      BIT(5)
-#define MACHINE_FLAG_DIAG44    BIT(6)
 #define MACHINE_FLAG_EDAT1     BIT(7)
 #define MACHINE_FLAG_EDAT2     BIT(8)
 #define MACHINE_FLAG_TOPOLOGY  BIT(10)
@@ -94,7 +93,6 @@ extern unsigned long __swsusp_reset_dma;
 #define MACHINE_HAS_DIAG9C     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
 #define MACHINE_HAS_ESOP       (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
 #define MACHINE_HAS_IDTE       (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
-#define MACHINE_HAS_DIAG44     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
 #define MACHINE_HAS_EDAT1      (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
 #define MACHINE_HAS_EDAT2      (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
 #define MACHINE_HAS_TOPOLOGY   (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
index 6da8885251d65d9499fbf5b3cc4491f9ee1170cd..670f14a228e55bb42c4cc516b660f91431a4cb0a 100644 (file)
@@ -194,9 +194,9 @@ static inline unsigned long long get_tod_clock_monotonic(void)
 {
        unsigned long long tod;
 
-       preempt_disable();
+       preempt_disable_notrace();
        tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
-       preempt_enable();
+       preempt_enable_notrace();
        return tod;
 }
 
index ef3c00b049ab45b4cd050d97a30e8210b63f7676..4093a2856929a0a420b82c819116fc6d4da6e90b 100644 (file)
@@ -86,7 +86,7 @@ static inline int share(unsigned long addr, u16 cmd)
        };
 
        if (!is_prot_virt_guest())
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        /*
         * Sharing is page wise, if we encounter addresses that are
         * not page aligned, we assume something went wrong. If
diff --git a/arch/s390/include/asm/vmalloc.h b/arch/s390/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..3ba3a6b
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_S390_VMALLOC_H
+#define _ASM_S390_VMALLOC_H
+
+#endif /* _ASM_S390_VMALLOC_H */
index d306fe04489a4772c3fbd284f4662a8391d9ba77..2c122d8bab93569fbcb647d9c553995ee2d34386 100644 (file)
@@ -195,6 +195,8 @@ void die(struct pt_regs *regs, const char *str)
               regs->int_code >> 17, ++die_counter);
 #ifdef CONFIG_PREEMPT
        pr_cont("PREEMPT ");
+#elif defined(CONFIG_PREEMPT_RT)
+       pr_cont("PREEMPT_RT ");
 #endif
        pr_cont("SMP ");
        if (debug_pagealloc_enabled())
index db32a55daaec605a7b0042f7c4eb126996c9ccba..cd241ee66eff4feb6d39842ac9972136394e23c1 100644 (file)
@@ -204,21 +204,6 @@ static __init void detect_diag9c(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
 }
 
-static __init void detect_diag44(void)
-{
-       int rc;
-
-       diag_stat_inc(DIAG_STAT_X044);
-       asm volatile(
-               "       diag    0,0,0x44\n"
-               "0:     la      %0,0\n"
-               "1:\n"
-               EX_TABLE(0b,1b)
-               : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
-       if (!rc)
-               S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
-}
-
 static __init void detect_machine_facilities(void)
 {
        if (test_facility(8)) {
@@ -331,7 +316,6 @@ void __init startup_init(void)
        setup_arch_string();
        setup_boot_command_line();
        detect_diag9c();
-       detect_diag44();
        detect_machine_facilities();
        save_vector_registers();
        setup_topology();
index 270d1d145761b131c6fd146747c333ba0ad4d474..9205add8481d5e4455109dce23cffbca3f7a2b3f 100644 (file)
@@ -790,7 +790,7 @@ ENTRY(io_int_handler)
 .Lio_work:
        tm      __PT_PSW+1(%r11),0x01   # returning to user ?
        jo      .Lio_work_user          # yes -> do resched & signal
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        # check for preemptive scheduling
        icm     %r0,15,__LC_PREEMPT_COUNT
        jnz     .Lio_restore            # preemption is disabled
index 9e1660a6b9db6e49be93e4b145bb639dee52d773..c3597d2e2ae0e2ff57f2d1d3b80f89152968da17 100644 (file)
@@ -35,6 +35,7 @@ EXPORT_SYMBOL(_mcount)
 ENTRY(ftrace_caller)
        .globl  ftrace_regs_caller
        .set    ftrace_regs_caller,ftrace_caller
+       stg     %r14,(__SF_GPRS+8*8)(%r15)      # save traced function caller
        lgr     %r1,%r15
 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
        aghi    %r0,MCOUNT_RETURN_FIXUP
index c07fdcd737266de36e821eae1d0fcc0bb41abf27..77d93c534284d0bba7b1a02f99e743d9d5ff8292 100644 (file)
@@ -1303,18 +1303,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
                 */
                if (flush_all && done)
                        break;
-
-               /* If an event overflow happened, discard samples by
-                * processing any remaining sample-data-blocks.
-                */
-               if (event_overflow)
-                       flush_all = 1;
        }
 
        /* Account sample overflows in the event hardware structure */
        if (sampl_overflow)
                OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
                                                 sampl_overflow, 1 + num_sdb);
+
+       /* Perf_event_overflow() and perf_event_account_interrupt() limit
+        * the interrupt rate to an upper limit. Roughly 1000 samples per
+        * task tick.
+        * Hitting this limit results in a large number
+        * of throttled REF_REPORT_THROTTLE entries and the samples
+        * are dropped.
+        * Slightly increase the interval to avoid hitting this limit.
+        */
+       if (event_overflow) {
+               SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
+               debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
+                                   __func__,
+                                   DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
+       }
+
        if (sampl_overflow || event_overflow)
                debug_sprintf_event(sfdbg, 4, "%s: "
                                    "overflows: sample %llu event %llu"
index 9cbf490fd162ec6b8df30a8cc0a9292cdbe90acb..d5fbd754f41a6fab29ee0ccfe9228859d2504de0 100644 (file)
@@ -1052,7 +1052,7 @@ static void __init log_component_list(void)
 
        if (!early_ipl_comp_list_addr)
                return;
-       if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
+       if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
                pr_info("Linux is running with Secure-IPL enabled\n");
        else
                pr_info("Linux is running with Secure-IPL disabled\n");
index 2794cad9312e37cda034425386edf7356f2e6d97..a08bd2522dd95a08a27de50850e60546fedb31e5 100644 (file)
@@ -413,14 +413,11 @@ EXPORT_SYMBOL(arch_vcpu_is_preempted);
 
 void smp_yield_cpu(int cpu)
 {
-       if (MACHINE_HAS_DIAG9C) {
-               diag_stat_inc_norecursion(DIAG_STAT_X09C);
-               asm volatile("diag %0,0,0x9c"
-                            : : "d" (pcpu_devices[cpu].address));
-       } else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) {
-               diag_stat_inc_norecursion(DIAG_STAT_X044);
-               asm volatile("diag 0,0,0x44");
-       }
+       if (!MACHINE_HAS_DIAG9C)
+               return;
+       diag_stat_inc_norecursion(DIAG_STAT_X09C);
+       asm volatile("diag %0,0,0x9c"
+                    : : "d" (pcpu_devices[cpu].address));
 }
 
 /*
index da2d4d4c5b0e0252186df8d985cf3dafa7fb6994..707fd99f6734d86ca1f037887f6be085b7366a3c 100644 (file)
@@ -36,10 +36,17 @@ static bool update_stack_info(struct unwind_state *state, unsigned long sp)
        return true;
 }
 
-static inline bool is_task_pt_regs(struct unwind_state *state,
-                                  struct pt_regs *regs)
+static inline bool is_final_pt_regs(struct unwind_state *state,
+                                   struct pt_regs *regs)
 {
-       return task_pt_regs(state->task) == regs;
+       /* user mode or kernel thread pt_regs at the bottom of task stack */
+       if (task_pt_regs(state->task) == regs)
+               return true;
+
+       /* user mode pt_regs at the bottom of irq stack */
+       return state->stack_info.type == STACK_TYPE_IRQ &&
+              state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs &&
+              READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE;
 }
 
 bool unwind_next_frame(struct unwind_state *state)
@@ -80,7 +87,7 @@ bool unwind_next_frame(struct unwind_state *state)
                        if (!on_stack(info, sp, sizeof(struct pt_regs)))
                                goto out_err;
                        regs = (struct pt_regs *) sp;
-                       if (is_task_pt_regs(state, regs))
+                       if (is_final_pt_regs(state, regs))
                                goto out_stop;
                        ip = READ_ONCE_NOCHECK(regs->psw.addr);
                        sp = READ_ONCE_NOCHECK(regs->gprs[15]);
index ce1e4bbe53aaf2d447f4aa82a4ba1c55ffdc13e6..9b2dab5a69f995055c9067f37a5688d65f1b4642 100644 (file)
@@ -242,7 +242,6 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
 
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
-       /* Use classic spinlocks + niai if the steal time is >= 10% */
        if (test_cpu_flag(CIF_DEDICATED_CPU))
                arch_spin_lock_queued(lp);
        else
index bda7ac0ddd29710d62bb23911408cc4e64adc329..32b7a30b2485d53f9dede2756ddfb7ce105bc4da 100644 (file)
@@ -238,7 +238,7 @@ static int test_unwind_irq(struct unwindme *u)
 {
        preempt_disable();
        if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
-               pr_info("Couldn't reqister external interrupt handler");
+               pr_info("Couldn't register external interrupt handler");
                return -1;
        }
        u->task = current;
index f0ce2222056592fdd0941b43918f610c2ee958cb..ac44bd76db4be13443b3da63e4ef0377516d2ddb 100644 (file)
@@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
        vmem_remove_mapping(start, size);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
index 460f2557294021f0f7bd49e1f31bcfbaed007f31..06345616a6466a921c9476ca6ecf6a0710a3638a 100644 (file)
@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void)
 enum populate_mode {
        POPULATE_ONE2ONE,
        POPULATE_MAP,
-       POPULATE_ZERO_SHADOW
+       POPULATE_ZERO_SHADOW,
+       POPULATE_SHALLOW
 };
 static void __init kasan_early_vmemmap_populate(unsigned long address,
                                                unsigned long end,
@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
                        pgd_populate(&init_mm, pg_dir, p4_dir);
                }
 
+               if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
+                   mode == POPULATE_SHALLOW) {
+                       address = (address + P4D_SIZE) & P4D_MASK;
+                       continue;
+               }
+
                p4_dir = p4d_offset(pg_dir, address);
                if (p4d_none(*p4_dir)) {
                        if (mode == POPULATE_ZERO_SHADOW &&
@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
                        p4d_populate(&init_mm, p4_dir, pu_dir);
                }
 
+               if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
+                   mode == POPULATE_SHALLOW) {
+                       address = (address + PUD_SIZE) & PUD_MASK;
+                       continue;
+               }
+
                pu_dir = pud_offset(p4_dir, address);
                if (pud_none(*pu_dir)) {
                        if (mode == POPULATE_ZERO_SHADOW &&
@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
                                page = kasan_early_shadow_page;
                                pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
                                break;
+                       case POPULATE_SHALLOW:
+                               /* should never happen */
+                               break;
                        }
                }
                address += PAGE_SIZE;
@@ -313,22 +329,50 @@ void __init kasan_early_init(void)
        init_mm.pgd = early_pg_dir;
        /*
         * Current memory layout:
-        * +- 0 -------------+   +- shadow start -+
-        * | 1:1 ram mapping |  /| 1/8 ram        |
-        * +- end of ram ----+ / +----------------+
-        * | ... gap ...     |/  |      kasan     |
-        * +- shadow start --+   |      zero      |
-        * | 1/8 addr space  |   |      page      |
-        * +- shadow end    -+   |      mapping   |
-        * | ... gap ...     |\  |    (untracked) |
-        * +- modules vaddr -+ \ +----------------+
-        * | 2Gb             |  \|      unmapped  | allocated per module
-        * +-----------------+   +- shadow end ---+
+        * +- 0 -------------+     +- shadow start -+
+        * | 1:1 ram mapping |    /| 1/8 ram        |
+        * |                 |   / |                |
+        * +- end of ram ----+  /  +----------------+
+        * | ... gap ...     | /   |                |
+        * |                 |/    |    kasan       |
+        * +- shadow start --+     |    zero        |
+        * | 1/8 addr space  |     |    page        |
+        * +- shadow end    -+     |    mapping     |
+        * | ... gap ...     |\    |  (untracked)   |
+        * +- vmalloc area  -+ \   |                |
+        * | vmalloc_size    |  \  |                |
+        * +- modules vaddr -+   \ +----------------+
+        * | 2Gb             |    \|      unmapped  | allocated per module
+        * +-----------------+     +- shadow end ---+
+        *
+        * Current memory layout (KASAN_VMALLOC):
+        * +- 0 -------------+     +- shadow start -+
+        * | 1:1 ram mapping |    /| 1/8 ram        |
+        * |                 |   / |                |
+        * +- end of ram ----+  /  +----------------+
+        * | ... gap ...     | /   |    kasan       |
+        * |                 |/    |    zero        |
+        * +- shadow start --+     |    page        |
+        * | 1/8 addr space  |     |    mapping     |
+        * +- shadow end    -+     |  (untracked)   |
+        * | ... gap ...     |\    |                |
+        * +- vmalloc area  -+ \   +- vmalloc area -+
+        * | vmalloc_size    |  \  |shallow populate|
+        * +- modules vaddr -+   \ +- modules area -+
+        * | 2Gb             |    \|shallow populate|
+        * +-----------------+     +- shadow end ---+
         */
        /* populate kasan shadow (for identity mapping and zero page mapping) */
        kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
        if (IS_ENABLED(CONFIG_MODULES))
                untracked_mem_end = vmax - MODULES_LEN;
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+               untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
+               /* shallowly populate kasan shadow for vmalloc and modules */
+               kasan_early_vmemmap_populate(__sha(untracked_mem_end),
+                                            __sha(vmax), POPULATE_SHALLOW);
+       }
+       /* populate kasan shadow for untracked memory */
        kasan_early_vmemmap_populate(__sha(max_physmem_end),
                                     __sha(untracked_mem_end),
                                     POPULATE_ZERO_SHADOW);
index 04a03433c72013b0bbf5a93bb72b477d0cd2d709..c82157f46b18693cf6ed6272934ed6cb5a205490 100644 (file)
@@ -1,3 +1,4 @@
 purgatory
+purgatory.chk
 purgatory.lds
 purgatory.ro
index bc0d7a0d039453d28464432c2d74994b827b4c1d..c57f8c40e992685812170f3db796256b7e620934 100644 (file)
@@ -4,7 +4,7 @@ OBJECT_FILES_NON_STANDARD := y
 
 purgatory-y := head.o purgatory.o string.o sha256.o mem.o
 
-targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro
+targets += $(purgatory-y) purgatory.lds purgatory purgatory.chk purgatory.ro
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
 $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
@@ -15,8 +15,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
 $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
        $(call if_changed_rule,as_o_S)
 
-$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
-       $(call if_changed_rule,cc_o_c)
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
@@ -26,15 +28,22 @@ KBUILD_CFLAGS += $(CLANG_FLAGS)
 KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
 
-LDFLAGS_purgatory := -r --no-undefined -nostdlib -z nodefaultlib -T
+# Since we link purgatory with -r unresolved symbols are not checked, so we
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -nostdlib -z nodefaultlib
+LDFLAGS_purgatory := -r $(PURGATORY_LDFLAGS) -T
+LDFLAGS_purgatory.chk := -e purgatory_start $(PURGATORY_LDFLAGS)
 $(obj)/purgatory: $(obj)/purgatory.lds $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
 
+$(obj)/purgatory.chk: $(obj)/purgatory FORCE
+               $(call if_changed,ld)
+
 OBJCOPYFLAGS_purgatory.ro := -O elf64-s390
 OBJCOPYFLAGS_purgatory.ro += --remove-section='*debug*'
 OBJCOPYFLAGS_purgatory.ro += --remove-section='.comment'
 OBJCOPYFLAGS_purgatory.ro += --remove-section='.note.*'
-$(obj)/purgatory.ro: $(obj)/purgatory FORCE
+$(obj)/purgatory.ro: $(obj)/purgatory $(obj)/purgatory.chk FORCE
                $(call if_changed,objcopy)
 
 $(obj)/kexec-purgatory.o: $(obj)/kexec-purgatory.S $(obj)/purgatory.ro FORCE
diff --git a/arch/s390/purgatory/string.c b/arch/s390/purgatory/string.c
new file mode 100644 (file)
index 0000000..c98c22a
--- /dev/null
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define __HAVE_ARCH_MEMCMP     /* arch function */
+#include "../lib/string.c"
index f356ee674d89b7148f68a9eecc14c2f805fe00d9..9ece111b02548097604f276f0ecb4177a38a0c7c 100644 (file)
@@ -108,7 +108,7 @@ config GENERIC_CALIBRATE_DELAY
 
 config GENERIC_LOCKBREAK
        def_bool y
-       depends on SMP && PREEMPT
+       depends on SMP && PREEMPTION
 
 config ARCH_SUSPEND_POSSIBLE
        def_bool n
index d964c4d6b1390bd5e1329b8d47b227ca4de52168..77dad1e511b4652b25a6d3137d2059d22a86629d 100644 (file)
@@ -341,7 +341,7 @@ static void __init sh7785lcr_setup(char **cmdline_p)
        pm_power_off = sh7785lcr_power_off;
 
        /* sm501 DRAM configuration */
-       sm501_reg = ioremap_nocache(SM107_REG_ADDR, SM501_DRAM_CONTROL);
+       sm501_reg = ioremap(SM107_REG_ADDR, SM501_DRAM_CONTROL);
        if (!sm501_reg) {
                printk(KERN_ERR "%s: ioremap error.\n", __func__);
                return;
index 9108789fafef7c7e1e72ec4cd8645a32e8891600..3b6ea2d99013626bcfd26f404dee9bb2cc2ae197 100644 (file)
@@ -137,7 +137,7 @@ void init_cayman_irq(void)
 {
        int i;
 
-       epld_virt = (unsigned long)ioremap_nocache(EPLD_BASE, 1024);
+       epld_virt = (unsigned long)ioremap(EPLD_BASE, 1024);
        if (!epld_virt) {
                printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n");
                return;
index 4cec14700adc652af3b4eaf1ec7671c35f9dcafe..8ef76e288da00cbf05a9be081fe355abe23ba6a5 100644 (file)
@@ -99,7 +99,7 @@ static int __init smsc_superio_setup(void)
 {
        unsigned char devid, devrev;
 
-       smsc_superio_virt = (unsigned long)ioremap_nocache(SMSC_SUPERIO_BASE, 1024);
+       smsc_superio_virt = (unsigned long)ioremap(SMSC_SUPERIO_BASE, 1024);
        if (!smsc_superio_virt) {
                panic("Unable to remap SMSC SuperIO\n");
        }
index 895576ff837632fb642fcc8318fd8b2688e0fc56..a37e1e88c6b1f22f6cfba2360bed189ef6614f2c 100644 (file)
@@ -32,7 +32,7 @@ static void __iomem *sdk7786_fpga_probe(void)
         * is reserved.
         */
        for (area = PA_AREA0; area < PA_AREA7; area += SZ_64M) {
-               base = ioremap_nocache(area + FPGA_REGS_OFFSET, FPGA_REGS_SIZE);
+               base = ioremap(area + FPGA_REGS_OFFSET, FPGA_REGS_SIZE);
                if (!base) {
                        /* Failed to remap this area, move along. */
                        continue;
index cf2fcccca8126dce0586fc363e04c509e18b633c..24391b444b286deb848a6afd2b97b94d1e50119d 100644 (file)
@@ -96,7 +96,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
                        return -ENOMEM;
        }
 
-       hd->base = ioremap_nocache(res->start, resource_size(res));
+       hd->base = ioremap(res->start, resource_size(res));
        if (unlikely(!hd->base)) {
                dev_err(&pdev->dev, "ioremap failed\n");
 
index 49303fab187bf66fa1671f480bb32acd9eb2d38f..03225d27770b656618cf8923f177c999f742895f 100644 (file)
@@ -115,12 +115,12 @@ static int __init sh5pci_init(void)
                 return -EINVAL;
         }
 
-       pcicr_virt = (unsigned long)ioremap_nocache(SH5PCI_ICR_BASE, 1024);
+       pcicr_virt = (unsigned long)ioremap(SH5PCI_ICR_BASE, 1024);
        if (!pcicr_virt) {
                panic("Unable to remap PCICR\n");
        }
 
-       PCI_IO_AREA = (unsigned long)ioremap_nocache(SH5PCI_IO_BASE, 0x10000);
+       PCI_IO_AREA = (unsigned long)ioremap(SH5PCI_IO_BASE, 0x10000);
        if (!PCI_IO_AREA) {
                panic("Unable to remap PCIIO\n");
        }
index f6d148451dfc724eb8f2fa5bf4916d8d459136d8..f3dc3f25b3ff08c2af8bd2a1db845fd2c6308ded 100644 (file)
@@ -325,9 +325,9 @@ int __init sh_early_platform_driver_probe(char *class_str,
 }
 
 /**
- * sh_early_platform_cleanup - clean up early platform code
+ * early_platform_cleanup - clean up early platform code
  */
-static int __init sh_early_platform_cleanup(void)
+void __init early_platform_cleanup(void)
 {
        struct platform_device *pd, *pd2;
 
@@ -337,11 +337,4 @@ static int __init sh_early_platform_cleanup(void)
                list_del(&pd->dev.devres_head);
                memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
        }
-
-       return 0;
 }
-/*
- * This must happen once after all early devices are probed but before probing
- * real platform devices.
- */
-subsys_initcall(sh_early_platform_cleanup);
index 1495489225aceac653bb22601796b354762c2b70..39c9ead489e5cee48056d8918bbe26510d7c0ae1 100644 (file)
@@ -367,7 +367,6 @@ static inline void ioremap_fixed_init(void) { }
 static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
 #endif
 
-#define ioremap_nocache        ioremap
 #define ioremap_uc     ioremap
 
 /*
diff --git a/arch/sh/include/asm/vmalloc.h b/arch/sh/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..716b774
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_SH_VMALLOC_H
+#define _ASM_SH_VMALLOC_H
+
+#endif /* _ASM_SH_VMALLOC_H */
index 744f903b4df359b8efe63671897a2f769e5b91d8..1b3050facda879f39ba683f382095b4acc1755e2 100644 (file)
@@ -124,7 +124,7 @@ void __init plat_irq_setup(void)
        unsigned long reg;
        int i;
 
-       intc_virt = (unsigned long)ioremap_nocache(INTC_BASE, 1024);
+       intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
        if (!intc_virt) {
                panic("Unable to remap INTC\n");
        }
index ae44dc24c4557b972488bd334b77cfc3e42c0bf6..d0d5d81455ae1e4139dff4fcd50402ed6710f09d 100644 (file)
@@ -88,8 +88,8 @@ static void j2_start_cpu(unsigned int cpu, unsigned long entry_point)
        if (!np) return;
 
        if (of_property_read_u32_array(np, "cpu-release-addr", regs, 2)) return;
-       release = ioremap_nocache(regs[0], sizeof(u32));
-       initpc = ioremap_nocache(regs[1], sizeof(u32));
+       release = ioremap(regs[0], sizeof(u32));
+       initpc = ioremap(regs[1], sizeof(u32));
 
        __raw_writel(entry_point, initpc);
        __raw_writel(1, release);
index 43763c26a752bbb4e43c6482acc8eba2fa9fdd88..dee6be2c23443dca65d1d52baac3c3426e81af0f 100644 (file)
@@ -68,7 +68,7 @@ static struct sh_clk_ops *sh5_clk_ops[] = {
 
 void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
 {
-       cprc_base = (unsigned long)ioremap_nocache(CPRC_BASE, 1024);
+       cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
        BUG_ON(!cprc_base);
 
        if (idx < ARRAY_SIZE(sh5_clk_ops))
index de68ffdfffbf535260132ed383b8f6b1d115f45c..81c8b64b977ffec6d709883c6d55978fcf13fd2d 100644 (file)
@@ -86,7 +86,7 @@
        andi    r6, ~0xf0, r6;          \
        putcon  r6, SR;
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 #  define preempt_stop()       CLI()
 #else
 #  define preempt_stop()
@@ -884,7 +884,7 @@ ret_from_exception:
 
        /* Check softirqs */
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        pta   ret_from_syscall, tr0
        blink   tr0, ZERO
 
index eeb25a4fa55f24e91071403b87da8f6d014878ec..d4811691b93cc170c8d4c3b39f29c84401e6ccac 100644 (file)
@@ -28,7 +28,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        arch_sync_dma_for_device(virt_to_phys(ret), size,
                        DMA_BIDIRECTIONAL);
 
-       ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
+       ret_nocache = (void __force *)ioremap(virt_to_phys(ret), size);
        if (!ret_nocache) {
                free_pages((unsigned long)ret, order);
                return NULL;
index d31f66e82ce516366767ef531f6ef945acbab613..956a7a03b0c838e06b68a41c968dfa7ca7a4b3ba 100644 (file)
@@ -41,7 +41,7 @@
  */
 #include <asm/dwarf.h>
 
-#if defined(CONFIG_PREEMPT)
+#if defined(CONFIG_PREEMPTION)
 #  define preempt_stop()       cli ; TRACE_IRQS_OFF
 #else
 #  define preempt_stop()
@@ -84,7 +84,7 @@ ENTRY(ret_from_irq)
        get_current_thread_info r8, r0
        bt      resume_kernel   ! Yes, it's from kernel, go back soon
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        bra     resume_userspace
         nop
 ENTRY(resume_kernel)
index 6d61f8cf4c131dac3b48b33acc4635f85814a32e..0d5f3c9d52f30edbcbf51141b8de16a294236903 100644 (file)
@@ -266,6 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                ptr = &remcomInBuffer[1];
                if (kgdb_hex2long(&ptr, &addr))
                        linux_regs->pc = addr;
+               /* fallthrough */
        case 'D':
        case 'k':
                atomic_set(&kgdb_cpu_doing_single_step, -1);
index dfdbaa50946ebbdfb2c1853b738eaf72ae0d734e..d1b1ff2be17aa728a84ba8e8701313664e9a76df 100644 (file)
@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = PFN_DOWN(start);
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
index eb24cb1afc11fbcc044583c3277b55aca9234f71..e8c3ea01c12f6d5cf90e55eb1fc25d6f8e248427 100644 (file)
@@ -277,7 +277,7 @@ config US3_MC
 config GENERIC_LOCKBREAK
        bool
        default y
-       depends on SPARC64 && SMP && PREEMPT
+       depends on SPARC64 && SMP && PREEMPTION
 
 config NUMA
        bool "NUMA support"
index f4afa301954a2fd039c42d4e20f50d93f16ae959..9bb27e5c22f159c97fed8410d59dd92c91084e0e 100644 (file)
@@ -406,7 +406,6 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
        return (void __iomem *)offset;
 }
 
-#define ioremap_nocache(X,Y)           ioremap((X),(Y))
 #define ioremap_uc(X,Y)                        ioremap((X),(Y))
 #define ioremap_wc(X,Y)                        ioremap((X),(Y))
 #define ioremap_wt(X,Y)                        ioremap((X),(Y))
diff --git a/arch/sparc/include/asm/vmalloc.h b/arch/sparc/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..04b8ab9
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_SPARC_VMALLOC_H
+#define _ASM_SPARC_VMALLOC_H
+
+#endif /* _ASM_SPARC_VMALLOC_H */
index 29aa34f11720cea3e098372995f751298b09e641..c5fd4b450d9b631b694bfd706e1959acef1d91ba 100644 (file)
@@ -310,7 +310,7 @@ kern_rtt_restore:
                retry
 
 to_kernel:
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
                ldsw                    [%g6 + TI_PRE_COUNT], %l5
                brnz                    %l5, kern_fpucheck
                 ldx                    [%g6 + TI_FLAGS], %l5
index 84cc8f7f83e9934e1aae5b4d38d3ce099c38e32a..c8eabb973b8688648dff42f70bd6f05b43a35103 100644 (file)
@@ -180,19 +180,19 @@ do {                                                                      \
 
 #define emit_loadptr(BASE, STRUCT, FIELD, DEST)                                \
 do {   unsigned int _off = offsetof(STRUCT, FIELD);                    \
-       BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *));    \
+       BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(void *));    \
        *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST);            \
 } while (0)
 
 #define emit_load32(BASE, STRUCT, FIELD, DEST)                         \
 do {   unsigned int _off = offsetof(STRUCT, FIELD);                    \
-       BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32));       \
+       BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u32));       \
        *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST);             \
 } while (0)
 
 #define emit_load16(BASE, STRUCT, FIELD, DEST)                         \
 do {   unsigned int _off = offsetof(STRUCT, FIELD);                    \
-       BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16));       \
+       BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u16));       \
        *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST);             \
 } while (0)
 
@@ -202,7 +202,7 @@ do {        unsigned int _off = offsetof(STRUCT, FIELD);                    \
 } while (0)
 
 #define emit_load8(BASE, STRUCT, FIELD, DEST)                          \
-do {   BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));        \
+do {   BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u8));        \
        __emit_load8(BASE, STRUCT, FIELD, DEST);                        \
 } while (0)
 
index 2a6d04fcb3e91c7e191cec43b7d1e2d6d832819b..6f0edd0c0220f40dac6fd3c89309458e9f0981e6 100644 (file)
@@ -14,6 +14,7 @@ config UML
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_BUGVERBOSE
+       select HAVE_COPY_THREAD_TLS
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
index 81c647ef9c6c8df98ae917178318a61647d9391e..adf91ef553ae91f215dcf1c457fcf258e8065423 100644 (file)
@@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request,
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
 
-extern int arch_copy_tls(struct task_struct *new);
+extern int arch_set_tls(struct task_struct *new, unsigned long tls);
 extern void clear_flushed_tls(struct task_struct *task);
 extern int syscall_trace_enter(struct pt_regs *regs);
 extern void syscall_trace_leave(struct pt_regs *regs);
diff --git a/arch/um/include/asm/vmalloc.h b/arch/um/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..9a7b9ed
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_UM_VMALLOC_H
+#define _ASM_UM_VMALLOC_H
+
+#endif /* _ASM_UM_VMALLOC_H */
index 263a8f06913341d1b0a9d013d89f12971e928391..17045e7211bfd461d9a055fcee9b0cb1f3ac6fed 100644 (file)
@@ -153,8 +153,8 @@ void fork_handler(void)
        userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp,
-               unsigned long arg, struct task_struct * p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+               unsigned long arg, struct task_struct * p, unsigned long tls)
 {
        void (*handler)(void);
        int kthread = current->flags & PF_KTHREAD;
@@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                 * Set a new TLS for the child thread?
                 */
                if (clone_flags & CLONE_SETTLS)
-                       ret = arch_copy_tls(p);
+                       ret = arch_set_tls(p, tls);
        }
 
        return ret;
index 4b460e01acfa8920fbcf1abd525fa2784392eb5c..3ca74e1cde7d63d5c203c2ef65bf4ec7ce51e139 100644 (file)
@@ -31,7 +31,6 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
  *
  */
 #define ioremap(cookie, size)          __uc32_ioremap(cookie, size)
-#define ioremap_nocache(cookie, size)  __uc32_ioremap(cookie, size)
 #define iounmap(cookie)                        __uc32_iounmap(cookie)
 
 #define readb_relaxed readb
diff --git a/arch/unicore32/include/asm/vmalloc.h b/arch/unicore32/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..0544358
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_UNICORE32_VMALLOC_H
+#define _ASM_UNICORE32_VMALLOC_H
+
+#endif /* _ASM_UNICORE32_VMALLOC_H */
index 97956b56074d8fbc191dcc8133ee2e22045a86f1..42bc6fb0d2be1ae148bc5a30fba0b1db19691be5 100644 (file)
@@ -96,7 +96,7 @@ config X86
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
        select ARCH_WANT_HUGE_PMD_SHARE
        select ARCH_WANTS_THP_SWAP              if X86_64
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLKEVT_I8253
        select CLOCKSOURCE_VALIDATE_LAST_CYCLE
        select CLOCKSOURCE_WATCHDOG
@@ -124,6 +124,7 @@ config X86
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
        select GENERIC_GETTIMEOFDAY
+       select GENERIC_VDSO_TIME_NS
        select GUP_GET_PTE_LOW_HIGH             if X86_PAE
        select HARDLOCKUP_CHECK_TIMESTAMP       if X86_64
        select HAVE_ACPI_APEI                   if ACPI
@@ -456,6 +457,7 @@ config X86_CPU_RESCTRL
        bool "x86 CPU resource control support"
        depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
        select KERNFS
+       select PROC_CPU_RESCTRL         if PROC_FS
        help
          Enable x86 CPU resource control support.
 
@@ -1512,7 +1514,7 @@ config X86_CPA_STATISTICS
        bool "Enable statistic for Change Page Attribute"
        depends on DEBUG_FS
        ---help---
-         Expose statistics about the Change Page Attribute mechanims, which
+         Expose statistics about the Change Page Attribute mechanism, which
          helps to determine the effectiveness of preserving large and huge
          page mappings when mapping protections are changed.
 
@@ -1993,6 +1995,7 @@ config EFI
 config EFI_STUB
        bool "EFI stub support"
        depends on EFI && !X86_USE_3DNOW
+       depends on $(cc-option,-mabi=ms) || X86_32
        select RELOCATABLE
        ---help---
          This kernel feature allows a bzImage to be loaded directly
index 95410d6ee2ff89ff26611b6bd0d580d418a572c1..748b6d28a91defb1aef2fc9a8fb1b5f26f34ab97 100644 (file)
@@ -88,7 +88,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
 
 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
 
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
index aa976adb7094ee37158a901a3372b079df13bcb8..56aa5fa0a66b1ee12e6d16731de86254e0cc4af4 100644 (file)
@@ -89,7 +89,7 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
 
 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
-vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
+vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o \
        $(objtree)/drivers/firmware/efi/libstub/lib.a
 vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 
@@ -103,7 +103,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 quiet_cmd_check_data_rel = DATAREL $@
 define cmd_check_data_rel
        for obj in $(filter %.o,$^); do \
-               ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
+               $(READELF) -S $$obj | grep -qF .rel.local && { \
                        echo "error: $$obj has data relocations!" >&2; \
                        exit 1; \
                } || true; \
index 72b08fde6de64dc44260c078c2cd19f8c0c5d7cd..287393d725f0130c4041be08b918cd544a2836df 100644 (file)
@@ -6,6 +6,8 @@
  *
  * ----------------------------------------------------------------------- */
 
+#pragma GCC visibility push(hidden)
+
 #include <linux/efi.h>
 #include <linux/pci.h>
 
 #include "eboot.h"
 
 static efi_system_table_t *sys_table;
+extern const bool efi_is64;
 
-static struct efi_config *efi_early;
-
-__pure const struct efi_config *__efi_early(void)
+__pure efi_system_table_t *efi_system_table(void)
 {
-       return efi_early;
+       return sys_table;
 }
 
-#define BOOT_SERVICES(bits)                                            \
-static void setup_boot_services##bits(struct efi_config *c)            \
-{                                                                      \
-       efi_system_table_##bits##_t *table;                             \
-                                                                       \
-       table = (typeof(table))sys_table;                               \
-                                                                       \
-       c->runtime_services     = table->runtime;                       \
-       c->boot_services        = table->boottime;                      \
-       c->text_output          = table->con_out;                       \
-}
-BOOT_SERVICES(32);
-BOOT_SERVICES(64);
-
-void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
+__attribute_const__ bool efi_is_64bit(void)
 {
-       efi_call_proto(efi_simple_text_output_protocol, output_string,
-                      efi_early->text_output, str);
+       if (IS_ENABLED(CONFIG_EFI_MIXED))
+               return efi_is64;
+       return IS_ENABLED(CONFIG_X86_64);
 }
 
 static efi_status_t
@@ -63,17 +51,17 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
         * large romsize. The UEFI spec limits the size of option ROMs to 16
         * MiB so we reject any ROMs over 16 MiB in size to catch this.
         */
-       romimage = (void *)(unsigned long)efi_table_attr(efi_pci_io_protocol,
-                                                        romimage, pci);
-       romsize = efi_table_attr(efi_pci_io_protocol, romsize, pci);
+       romimage = efi_table_attr(pci, romimage);
+       romsize = efi_table_attr(pci, romsize);
        if (!romimage || !romsize || romsize > SZ_16M)
                return EFI_INVALID_PARAMETER;
 
        size = romsize + sizeof(*rom);
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+                            (void **)&rom);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
+               efi_printk("Failed to allocate memory for 'rom'\n");
                return status;
        }
 
@@ -85,27 +73,24 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
        rom->pcilen     = pci->romsize;
        *__rom = rom;
 
-       status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
-                               EfiPciIoWidthUint16, PCI_VENDOR_ID, 1,
-                               &rom->vendor);
+       status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+                               PCI_VENDOR_ID, 1, &rom->vendor);
 
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to read rom->vendor\n");
+               efi_printk("Failed to read rom->vendor\n");
                goto free_struct;
        }
 
-       status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
-                               EfiPciIoWidthUint16, PCI_DEVICE_ID, 1,
-                               &rom->devid);
+       status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+                               PCI_DEVICE_ID, 1, &rom->devid);
 
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to read rom->devid\n");
+               efi_printk("Failed to read rom->devid\n");
                goto free_struct;
        }
 
-       status = efi_call_proto(efi_pci_io_protocol, get_location, pci,
-                               &rom->segment, &rom->bus, &rom->device,
-                               &rom->function);
+       status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
+                               &rom->device, &rom->function);
 
        if (status != EFI_SUCCESS)
                goto free_struct;
@@ -114,7 +99,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
        return status;
 
 free_struct:
-       efi_call_early(free_pool, rom);
+       efi_bs_call(free_pool, rom);
        return status;
 }
 
@@ -133,27 +118,24 @@ static void setup_efi_pci(struct boot_params *params)
        void **pci_handle = NULL;
        efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
        unsigned long size = 0;
-       unsigned long nr_pci;
        struct setup_data *data;
+       efi_handle_t h;
        int i;
 
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               &pci_proto, NULL, &size, pci_handle);
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                            &pci_proto, NULL, &size, pci_handle);
 
        if (status == EFI_BUFFER_TOO_SMALL) {
-               status = efi_call_early(allocate_pool,
-                                       EFI_LOADER_DATA,
-                                       size, (void **)&pci_handle);
+               status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+                                    (void **)&pci_handle);
 
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
+                       efi_printk("Failed to allocate memory for 'pci_handle'\n");
                        return;
                }
 
-               status = efi_call_early(locate_handle,
-                                       EFI_LOCATE_BY_PROTOCOL, &pci_proto,
-                                       NULL, &size, pci_handle);
+               status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                                    &pci_proto, NULL, &size, pci_handle);
        }
 
        if (status != EFI_SUCCESS)
@@ -164,15 +146,12 @@ static void setup_efi_pci(struct boot_params *params)
        while (data && data->next)
                data = (struct setup_data *)(unsigned long)data->next;
 
-       nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
-       for (i = 0; i < nr_pci; i++) {
+       for_each_efi_handle(h, pci_handle, size, i) {
                efi_pci_io_protocol_t *pci = NULL;
                struct pci_setup_rom *rom;
 
-               status = efi_call_early(handle_protocol,
-                                       efi_is_64bit() ? ((u64 *)pci_handle)[i]
-                                                      : ((u32 *)pci_handle)[i],
-                                       &pci_proto, (void **)&pci);
+               status = efi_bs_call(handle_protocol, h, &pci_proto,
+                                    (void **)&pci);
                if (status != EFI_SUCCESS || !pci)
                        continue;
 
@@ -189,7 +168,7 @@ static void setup_efi_pci(struct boot_params *params)
        }
 
 free_handle:
-       efi_call_early(free_pool, pci_handle);
+       efi_bs_call(free_pool, pci_handle);
 }
 
 static void retrieve_apple_device_properties(struct boot_params *boot_params)
@@ -198,34 +177,34 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
        struct setup_data *data, *new;
        efi_status_t status;
        u32 size = 0;
-       void *p;
+       apple_properties_protocol_t *p;
 
-       status = efi_call_early(locate_protocol, &guid, NULL, &p);
+       status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&p);
        if (status != EFI_SUCCESS)
                return;
 
-       if (efi_table_attr(apple_properties_protocol, version, p) != 0x10000) {
-               efi_printk(sys_table, "Unsupported properties proto version\n");
+       if (efi_table_attr(p, version) != 0x10000) {
+               efi_printk("Unsupported properties proto version\n");
                return;
        }
 
-       efi_call_proto(apple_properties_protocol, get_all, p, NULL, &size);
+       efi_call_proto(p, get_all, NULL, &size);
        if (!size)
                return;
 
        do {
-               status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                                       size + sizeof(struct setup_data), &new);
+               status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+                                    size + sizeof(struct setup_data),
+                                    (void **)&new);
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
+                       efi_printk("Failed to allocate memory for 'properties'\n");
                        return;
                }
 
-               status = efi_call_proto(apple_properties_protocol, get_all, p,
-                                       new->data, &size);
+               status = efi_call_proto(p, get_all, new->data, &size);
 
                if (status == EFI_BUFFER_TOO_SMALL)
-                       efi_call_early(free_pool, new);
+                       efi_bs_call(free_pool, new);
        } while (status == EFI_BUFFER_TOO_SMALL);
 
        new->type = SETUP_APPLE_PROPERTIES;
@@ -247,7 +226,7 @@ static const efi_char16_t apple[] = L"Apple";
 static void setup_quirks(struct boot_params *boot_params)
 {
        efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
-               efi_table_attr(efi_system_table, fw_vendor, sys_table);
+               efi_table_attr(efi_system_table(), fw_vendor);
 
        if (!memcmp(fw_vendor, apple, sizeof(apple))) {
                if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
@@ -265,17 +244,16 @@ setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
        u32 width, height;
        void **uga_handle = NULL;
        efi_uga_draw_protocol_t *uga = NULL, *first_uga;
-       unsigned long nr_ugas;
+       efi_handle_t handle;
        int i;
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)&uga_handle);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+                            (void **)&uga_handle);
        if (status != EFI_SUCCESS)
                return status;
 
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               uga_proto, NULL, &size, uga_handle);
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                            uga_proto, NULL, &size, uga_handle);
        if (status != EFI_SUCCESS)
                goto free_handle;
 
@@ -283,24 +261,20 @@ setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
        width = 0;
 
        first_uga = NULL;
-       nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
-       for (i = 0; i < nr_ugas; i++) {
+       for_each_efi_handle(handle, uga_handle, size, i) {
                efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
                u32 w, h, depth, refresh;
                void *pciio;
-               unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
-                                                     : ((u32 *)uga_handle)[i];
 
-               status = efi_call_early(handle_protocol, handle,
-                                       uga_proto, (void **)&uga);
+               status = efi_bs_call(handle_protocol, handle, uga_proto,
+                                    (void **)&uga);
                if (status != EFI_SUCCESS)
                        continue;
 
                pciio = NULL;
-               efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
+               efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
 
-               status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
-                                       &w, &h, &depth, &refresh);
+               status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
                if (status == EFI_SUCCESS && (!first_uga || pciio)) {
                        width = w;
                        height = h;
@@ -336,7 +310,7 @@ setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
        si->rsvd_pos            = 24;
 
 free_handle:
-       efi_call_early(free_pool, uga_handle);
+       efi_bs_call(free_pool, uga_handle);
 
        return status;
 }
@@ -355,37 +329,38 @@ void setup_graphics(struct boot_params *boot_params)
        memset(si, 0, sizeof(*si));
 
        size = 0;
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               &graphics_proto, NULL, &size, gop_handle);
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                            &graphics_proto, NULL, &size, gop_handle);
        if (status == EFI_BUFFER_TOO_SMALL)
-               status = efi_setup_gop(NULL, si, &graphics_proto, size);
+               status = efi_setup_gop(si, &graphics_proto, size);
 
        if (status != EFI_SUCCESS) {
                size = 0;
-               status = efi_call_early(locate_handle,
-                                       EFI_LOCATE_BY_PROTOCOL,
-                                       &uga_proto, NULL, &size, uga_handle);
+               status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                                    &uga_proto, NULL, &size, uga_handle);
                if (status == EFI_BUFFER_TOO_SMALL)
                        setup_uga(si, &uga_proto, size);
        }
 }
 
+void startup_32(struct boot_params *boot_params);
+
+void __noreturn efi_stub_entry(efi_handle_t handle,
+                              efi_system_table_t *sys_table_arg,
+                              struct boot_params *boot_params);
+
 /*
  * Because the x86 boot code expects to be passed a boot_params we
  * need to create one ourselves (usually the bootloader would create
  * one for us).
- *
- * The caller is responsible for filling out ->code32_start in the
- * returned boot_params.
  */
-struct boot_params *make_boot_params(struct efi_config *c)
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+                                  efi_system_table_t *sys_table_arg)
 {
        struct boot_params *boot_params;
        struct apm_bios_info *bi;
        struct setup_header *hdr;
        efi_loaded_image_t *image;
-       void *handle;
        efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
        int options_size = 0;
        efi_status_t status;
@@ -393,31 +368,22 @@ struct boot_params *make_boot_params(struct efi_config *c)
        unsigned long ramdisk_addr;
        unsigned long ramdisk_size;
 
-       efi_early = c;
-       sys_table = (efi_system_table_t *)(unsigned long)efi_early->table;
-       handle = (void *)(unsigned long)efi_early->image_handle;
+       sys_table = sys_table_arg;
 
        /* Check if we were booted by the EFI firmware */
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
-               return NULL;
-
-       if (efi_is_64bit())
-               setup_boot_services64(efi_early);
-       else
-               setup_boot_services32(efi_early);
+               return EFI_INVALID_PARAMETER;
 
-       status = efi_call_early(handle_protocol, handle,
-                               &proto, (void *)&image);
+       status = efi_bs_call(handle_protocol, handle, &proto, (void *)&image);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
-               return NULL;
+               efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+               return status;
        }
 
-       status = efi_low_alloc(sys_table, 0x4000, 1,
-                              (unsigned long *)&boot_params);
+       status = efi_low_alloc(0x4000, 1, (unsigned long *)&boot_params);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
-               return NULL;
+               efi_printk("Failed to allocate lowmem for boot params\n");
+               return status;
        }
 
        memset(boot_params, 0x0, 0x4000);
@@ -439,7 +405,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        hdr->type_of_loader = 0x21;
 
        /* Convert unicode cmdline to ascii */
-       cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
+       cmdline_ptr = efi_convert_cmdline(image, &options_size);
        if (!cmdline_ptr)
                goto fail;
 
@@ -457,15 +423,15 @@ struct boot_params *make_boot_params(struct efi_config *c)
        if (status != EFI_SUCCESS)
                goto fail2;
 
-       status = handle_cmdline_files(sys_table, image,
+       status = handle_cmdline_files(image,
                                      (char *)(unsigned long)hdr->cmd_line_ptr,
                                      "initrd=", hdr->initrd_addr_max,
                                      &ramdisk_addr, &ramdisk_size);
 
        if (status != EFI_SUCCESS &&
            hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) {
-               efi_printk(sys_table, "Trying to load files to higher address\n");
-               status = handle_cmdline_files(sys_table, image,
+               efi_printk("Trying to load files to higher address\n");
+               status = handle_cmdline_files(image,
                                      (char *)(unsigned long)hdr->cmd_line_ptr,
                                      "initrd=", -1UL,
                                      &ramdisk_addr, &ramdisk_size);
@@ -478,14 +444,17 @@ struct boot_params *make_boot_params(struct efi_config *c)
        boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32;
        boot_params->ext_ramdisk_size  = (u64)ramdisk_size >> 32;
 
-       return boot_params;
+       hdr->code32_start = (u32)(unsigned long)startup_32;
+
+       efi_stub_entry(handle, sys_table, boot_params);
+       /* not reached */
 
 fail2:
-       efi_free(sys_table, options_size, hdr->cmd_line_ptr);
+       efi_free(options_size, hdr->cmd_line_ptr);
 fail:
-       efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+       efi_free(0x4000, (unsigned long)boot_params);
 
-       return NULL;
+       return status;
 }
 
 static void add_e820ext(struct boot_params *params,
@@ -620,13 +589,13 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
                sizeof(struct e820_entry) * nr_desc;
 
        if (*e820ext) {
-               efi_call_early(free_pool, *e820ext);
+               efi_bs_call(free_pool, *e820ext);
                *e820ext = NULL;
                *e820ext_size = 0;
        }
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)e820ext);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+                            (void **)e820ext);
        if (status == EFI_SUCCESS)
                *e820ext_size = size;
 
@@ -650,7 +619,7 @@ static efi_status_t allocate_e820(struct boot_params *params,
        boot_map.key_ptr        = NULL;
        boot_map.buff_size      = &buff_size;
 
-       status = efi_get_memory_map(sys_table, &boot_map);
+       status = efi_get_memory_map(&boot_map);
        if (status != EFI_SUCCESS)
                return status;
 
@@ -672,8 +641,7 @@ struct exit_boot_struct {
        struct efi_info         *efi;
 };
 
-static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
-                                  struct efi_boot_memmap *map,
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
                                   void *priv)
 {
        const char *signature;
@@ -683,14 +651,14 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
                                   : EFI32_LOADER_SIGNATURE;
        memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
 
-       p->efi->efi_systab              = (unsigned long)sys_table_arg;
+       p->efi->efi_systab              = (unsigned long)efi_system_table();
        p->efi->efi_memdesc_size        = *map->desc_size;
        p->efi->efi_memdesc_version     = *map->desc_ver;
        p->efi->efi_memmap              = (unsigned long)*map->map;
        p->efi->efi_memmap_size         = *map->map_size;
 
 #ifdef CONFIG_X86_64
-       p->efi->efi_systab_hi           = (unsigned long)sys_table_arg >> 32;
+       p->efi->efi_systab_hi           = (unsigned long)efi_system_table() >> 32;
        p->efi->efi_memmap_hi           = (unsigned long)*map->map >> 32;
 #endif
 
@@ -722,8 +690,7 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
                return status;
 
        /* Might as well exit boot services now */
-       status = efi_exit_boot_services(sys_table, handle, &map, &priv,
-                                       exit_boot_func);
+       status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
        if (status != EFI_SUCCESS)
                return status;
 
@@ -741,33 +708,22 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
  * On success we return a pointer to a boot_params structure, and NULL
  * on failure.
  */
-struct boot_params *
-efi_main(struct efi_config *c, struct boot_params *boot_params)
+struct boot_params *efi_main(efi_handle_t handle,
+                            efi_system_table_t *sys_table_arg,
+                            struct boot_params *boot_params)
 {
        struct desc_ptr *gdt = NULL;
        struct setup_header *hdr = &boot_params->hdr;
        efi_status_t status;
        struct desc_struct *desc;
-       void *handle;
-       efi_system_table_t *_table;
        unsigned long cmdline_paddr;
 
-       efi_early = c;
-
-       _table = (efi_system_table_t *)(unsigned long)efi_early->table;
-       handle = (void *)(unsigned long)efi_early->image_handle;
-
-       sys_table = _table;
+       sys_table = sys_table_arg;
 
        /* Check if we were booted by the EFI firmware */
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                goto fail;
 
-       if (efi_is_64bit())
-               setup_boot_services64(efi_early);
-       else
-               setup_boot_services32(efi_early);
-
        /*
         * make_boot_params() may have been called before efi_main(), in which
         * case this is the second time we parse the cmdline. This is ok,
@@ -782,14 +738,14 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
         * otherwise we ask the BIOS.
         */
        if (boot_params->secure_boot == efi_secureboot_mode_unset)
-               boot_params->secure_boot = efi_get_secureboot(sys_table);
+               boot_params->secure_boot = efi_get_secureboot();
 
        /* Ask the firmware to clear memory on unclean shutdown */
-       efi_enable_reset_attack_mitigation(sys_table);
+       efi_enable_reset_attack_mitigation();
 
-       efi_random_get_seed(sys_table);
+       efi_random_get_seed();
 
-       efi_retrieve_tpm2_eventlog(sys_table);
+       efi_retrieve_tpm2_eventlog();
 
        setup_graphics(boot_params);
 
@@ -797,18 +753,17 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
 
        setup_quirks(boot_params);
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               sizeof(*gdt), (void **)&gdt);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*gdt),
+                            (void **)&gdt);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
+               efi_printk("Failed to allocate memory for 'gdt' structure\n");
                goto fail;
        }
 
        gdt->size = 0x800;
-       status = efi_low_alloc(sys_table, gdt->size, 8,
-                          (unsigned long *)&gdt->address);
+       status = efi_low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
+               efi_printk("Failed to allocate memory for 'gdt'\n");
                goto fail;
        }
 
@@ -818,13 +773,13 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
         */
        if (hdr->pref_address != hdr->code32_start) {
                unsigned long bzimage_addr = hdr->code32_start;
-               status = efi_relocate_kernel(sys_table, &bzimage_addr,
+               status = efi_relocate_kernel(&bzimage_addr,
                                             hdr->init_size, hdr->init_size,
                                             hdr->pref_address,
                                             hdr->kernel_alignment,
                                             LOAD_PHYSICAL_ADDR);
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table, "efi_relocate_kernel() failed!\n");
+                       efi_printk("efi_relocate_kernel() failed!\n");
                        goto fail;
                }
 
@@ -834,7 +789,7 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
 
        status = exit_boot(boot_params, handle);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "exit_boot() failed!\n");
+               efi_printk("exit_boot() failed!\n");
                goto fail;
        }
 
@@ -927,7 +882,8 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
 
        return boot_params;
 fail:
-       efi_printk(sys_table, "efi_main() failed!\n");
+       efi_printk("efi_main() failed!\n");
 
-       return NULL;
+       for (;;)
+               asm("hlt");
 }
index 8297387c4676124167207dc1383467a0b221c6ff..99f35343d443bf3d6e85f88620f55b0841ceaa59 100644 (file)
 
 #define DESC_TYPE_CODE_DATA    (1 << 0)
 
-typedef struct {
-       u32 get_mode;
-       u32 set_mode;
-       u32 blt;
-} efi_uga_draw_protocol_32_t;
+typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
 
-typedef struct {
-       u64 get_mode;
-       u64 set_mode;
-       u64 blt;
-} efi_uga_draw_protocol_64_t;
-
-typedef struct {
-       void *get_mode;
-       void *set_mode;
-       void *blt;
-} efi_uga_draw_protocol_t;
+union efi_uga_draw_protocol {
+       struct {
+               efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *,
+                                                 u32*, u32*, u32*, u32*);
+               void *set_mode;
+               void *blt;
+       };
+       struct {
+               u32 get_mode;
+               u32 set_mode;
+               u32 blt;
+       } mixed_mode;
+};
 
 #endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
deleted file mode 100644 (file)
index ed6c351..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * EFI call stub for IA32.
- *
- * This stub allows us to make EFI calls in physical mode with interrupts
- * turned off. Note that this implementation is different from the one in
- * arch/x86/platform/efi/efi_stub_32.S because we're _already_ in physical
- * mode at this point.
- */
-
-#include <linux/linkage.h>
-#include <asm/page_types.h>
-
-/*
- * efi_call_phys(void *, ...) is a function with variable parameters.
- * All the callers of this function assure that all the parameters are 4-bytes.
- */
-
-/*
- * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save.
- * So we'd better save all of them at the beginning of this function and restore
- * at the end no matter how many we use, because we can not assure EFI runtime
- * service functions will comply with gcc calling convention, too.
- */
-
-.text
-SYM_FUNC_START(efi_call_phys)
-       /*
-        * 0. The function can only be called in Linux kernel. So CS has been
-        * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
-        * the values of these registers are the same. And, the corresponding
-        * GDT entries are identical. So I will do nothing about segment reg
-        * and GDT, but change GDT base register in prelog and epilog.
-        */
-
-       /*
-        * 1. Because we haven't been relocated by this point we need to
-        * use relative addressing.
-        */
-       call    1f
-1:     popl    %edx
-       subl    $1b, %edx
-
-       /*
-        * 2. Now on the top of stack is the return
-        * address in the caller of efi_call_phys(), then parameter 1,
-        * parameter 2, ..., param n. To make things easy, we save the return
-        * address of efi_call_phys in a global variable.
-        */
-       popl    %ecx
-       movl    %ecx, saved_return_addr(%edx)
-       /* get the function pointer into ECX*/
-       popl    %ecx
-       movl    %ecx, efi_rt_function_ptr(%edx)
-
-       /*
-        * 3. Call the physical function.
-        */
-       call    *%ecx
-
-       /*
-        * 4. Balance the stack. And because EAX contain the return value,
-        * we'd better not clobber it. We need to calculate our address
-        * again because %ecx and %edx are not preserved across EFI function
-        * calls.
-        */
-       call    1f
-1:     popl    %edx
-       subl    $1b, %edx
-
-       movl    efi_rt_function_ptr(%edx), %ecx
-       pushl   %ecx
-
-       /*
-        * 10. Push the saved return address onto the stack and return.
-        */
-       movl    saved_return_addr(%edx), %ecx
-       pushl   %ecx
-       ret
-SYM_FUNC_END(efi_call_phys)
-.previous
-
-.data
-saved_return_addr:
-       .long 0
-efi_rt_function_ptr:
-       .long 0
diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
deleted file mode 100644 (file)
index 99494df..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <asm/segment.h>
-#include <asm/msr.h>
-#include <asm/processor-flags.h>
-
-#include "../../platform/efi/efi_stub_64.S"
index 593913692d166d7146690d07520db75b45fb7d31..8fb7f6799c52dfab7cee9ed2fc3c99ec86c5b8ff 100644 (file)
@@ -10,7 +10,7 @@
  * needs to be able to service interrupts.
  *
  * On the plus side, we don't have to worry about mangling 64-bit
- * addresses into 32-bits because we're executing with an identify
+ * addresses into 32-bits because we're executing with an identity
  * mapped pagetable and haven't transitioned to 64-bit virtual addresses
  * yet.
  */
 
        .code64
        .text
-SYM_FUNC_START(efi64_thunk)
+SYM_FUNC_START(__efi64_thunk)
        push    %rbp
        push    %rbx
 
-       subq    $8, %rsp
-       leaq    efi_exit32(%rip), %rax
-       movl    %eax, 4(%rsp)
-       leaq    efi_gdt64(%rip), %rax
-       movl    %eax, (%rsp)
-       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
+       leaq    1f(%rip), %rbp
+       leaq    efi_gdt64(%rip), %rbx
+       movl    %ebx, 2(%rbx)           /* Fixup the gdt base address */
 
        movl    %ds, %eax
        push    %rax
@@ -48,15 +45,10 @@ SYM_FUNC_START(efi64_thunk)
        movl    %esi, 0x0(%rsp)
        movl    %edx, 0x4(%rsp)
        movl    %ecx, 0x8(%rsp)
-       movq    %r8, %rsi
-       movl    %esi, 0xc(%rsp)
-       movq    %r9, %rsi
-       movl    %esi,  0x10(%rsp)
+       movl    %r8d, 0xc(%rsp)
+       movl    %r9d, 0x10(%rsp)
 
-       sgdt    save_gdt(%rip)
-
-       leaq    1f(%rip), %rbx
-       movq    %rbx, func_rt_ptr(%rip)
+       sgdt    0x14(%rsp)
 
        /*
         * Switch to gdt with 32-bit segments. This is the firmware GDT
@@ -71,9 +63,9 @@ SYM_FUNC_START(efi64_thunk)
        pushq   %rax
        lretq
 
-1:     addq    $32, %rsp
-
-       lgdt    save_gdt(%rip)
+1:     lgdt    0x14(%rsp)
+       addq    $32, %rsp
+       movq    %rdi, %rax
 
        pop     %rbx
        movl    %ebx, %ss
@@ -85,26 +77,13 @@ SYM_FUNC_START(efi64_thunk)
        /*
         * Convert 32-bit status code into 64-bit.
         */
-       test    %rax, %rax
-       jz      1f
-       movl    %eax, %ecx
-       andl    $0x0fffffff, %ecx
-       andl    $0xf0000000, %eax
-       shl     $32, %rax
-       or      %rcx, %rax
-1:
-       addq    $8, %rsp
+       roll    $1, %eax
+       rorq    $1, %rax
+
        pop     %rbx
        pop     %rbp
        ret
-SYM_FUNC_END(efi64_thunk)
-
-SYM_FUNC_START_LOCAL(efi_exit32)
-       movq    func_rt_ptr(%rip), %rax
-       push    %rax
-       mov     %rdi, %rax
-       ret
-SYM_FUNC_END(efi_exit32)
+SYM_FUNC_END(__efi64_thunk)
 
        .code32
 /*
@@ -144,9 +123,7 @@ SYM_FUNC_START_LOCAL(efi_enter32)
         */
        cli
 
-       movl    56(%esp), %eax
-       movl    %eax, 2(%eax)
-       lgdtl   (%eax)
+       lgdtl   (%ebx)
 
        movl    %cr4, %eax
        btsl    $(X86_CR4_PAE_BIT), %eax
@@ -163,9 +140,8 @@ SYM_FUNC_START_LOCAL(efi_enter32)
        xorl    %eax, %eax
        lldt    %ax
 
-       movl    60(%esp), %eax
        pushl   $__KERNEL_CS
-       pushl   %eax
+       pushl   %ebp
 
        /* Enable paging */
        movl    %cr0, %eax
@@ -181,13 +157,6 @@ SYM_DATA_START(efi32_boot_gdt)
        .quad   0
 SYM_DATA_END(efi32_boot_gdt)
 
-SYM_DATA_START_LOCAL(save_gdt)
-       .word   0
-       .quad   0
-SYM_DATA_END(save_gdt)
-
-SYM_DATA_LOCAL(func_rt_ptr, .quad 0)
-
 SYM_DATA_START(efi_gdt64)
        .word   efi_gdt64_end - efi_gdt64
        .long   0                       /* Filled out by user */
index f2dfd6d083ef2c6094ef5a67489176bc36ba9d28..73f17d0544dd58121fdae184e6f0b65d7e2b9f0d 100644 (file)
@@ -145,67 +145,16 @@ SYM_FUNC_START(startup_32)
 SYM_FUNC_END(startup_32)
 
 #ifdef CONFIG_EFI_STUB
-/*
- * We don't need the return address, so set up the stack so efi_main() can find
- * its arguments.
- */
-SYM_FUNC_START(efi_pe_entry)
-       add     $0x4, %esp
-
-       call    1f
-1:     popl    %esi
-       subl    $1b, %esi
-
-       popl    %ecx
-       movl    %ecx, efi32_config(%esi)        /* Handle */
-       popl    %ecx
-       movl    %ecx, efi32_config+8(%esi)      /* EFI System table pointer */
-
-       /* Relocate efi_config->call() */
-       leal    efi32_config(%esi), %eax
-       add     %esi, 40(%eax)
-       pushl   %eax
-
-       call    make_boot_params
-       cmpl    $0, %eax
-       je      fail
-       movl    %esi, BP_code32_start(%eax)
-       popl    %ecx
-       pushl   %eax
-       pushl   %ecx
-       jmp     2f              /* Skip efi_config initialization */
-SYM_FUNC_END(efi_pe_entry)
-
 SYM_FUNC_START(efi32_stub_entry)
+SYM_FUNC_START_ALIAS(efi_stub_entry)
        add     $0x4, %esp
-       popl    %ecx
-       popl    %edx
-
-       call    1f
-1:     popl    %esi
-       subl    $1b, %esi
-
-       movl    %ecx, efi32_config(%esi)        /* Handle */
-       movl    %edx, efi32_config+8(%esi)      /* EFI System table pointer */
-
-       /* Relocate efi_config->call() */
-       leal    efi32_config(%esi), %eax
-       add     %esi, 40(%eax)
-       pushl   %eax
-2:
        call    efi_main
-       cmpl    $0, %eax
        movl    %eax, %esi
-       jne     2f
-fail:
-       /* EFI init failed, so hang. */
-       hlt
-       jmp     fail
-2:
        movl    BP_code32_start(%esi), %eax
        leal    startup_32(%eax), %eax
        jmp     *%eax
 SYM_FUNC_END(efi32_stub_entry)
+SYM_FUNC_END_ALIAS(efi_stub_entry)
 #endif
 
        .text
@@ -240,11 +189,9 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
                                /* push arguments for extract_kernel: */
        pushl   $z_output_len   /* decompressed length, end of relocs */
 
-       movl    BP_init_size(%esi), %eax
-       subl    $_end, %eax
-       movl    %ebx, %ebp
-       subl    %eax, %ebp
-       pushl   %ebp            /* output address */
+       leal    _end(%ebx), %eax
+       subl    BP_init_size(%esi), %eax
+       pushl   %eax            /* output address */
 
        pushl   $z_input_len    /* input_len */
        leal    input_data(%ebx), %eax
@@ -262,15 +209,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
        jmp     *%eax
 SYM_FUNC_END(.Lrelocated)
 
-#ifdef CONFIG_EFI_STUB
-       .data
-efi32_config:
-       .fill 5,8,0
-       .long efi_call_phys
-       .long 0
-       .byte 0
-#endif
-
 /*
  * Stack and heap for uncompression
  */
index 58a512e33d8d628c60e33cfe21322d7823dbdc3e..1f1f6c8139b38c9c72bb6e7b11b6da2cf238eb0a 100644 (file)
@@ -208,10 +208,12 @@ SYM_FUNC_START(startup_32)
        pushl   $__KERNEL_CS
        leal    startup_64(%ebp), %eax
 #ifdef CONFIG_EFI_MIXED
-       movl    efi32_config(%ebp), %ebx
-       cmp     $0, %ebx
+       movl    efi32_boot_args(%ebp), %edi
+       cmp     $0, %edi
        jz      1f
-       leal    handover_entry(%ebp), %eax
+       leal    efi64_stub_entry(%ebp), %eax
+       movl    %esi, %edx
+       movl    efi32_boot_args+4(%ebp), %esi
 1:
 #endif
        pushl   %eax
@@ -232,17 +234,19 @@ SYM_FUNC_START(efi32_stub_entry)
        popl    %edx
        popl    %esi
 
-       leal    (BP_scratch+4)(%esi), %esp
        call    1f
 1:     pop     %ebp
        subl    $1b, %ebp
 
-       movl    %ecx, efi32_config(%ebp)
-       movl    %edx, efi32_config+8(%ebp)
+       movl    %ecx, efi32_boot_args(%ebp)
+       movl    %edx, efi32_boot_args+4(%ebp)
        sgdtl   efi32_boot_gdt(%ebp)
+       movb    $0, efi_is64(%ebp)
 
-       leal    efi32_config(%ebp), %eax
-       movl    %eax, efi_config(%ebp)
+       /* Disable paging */
+       movl    %cr0, %eax
+       btrl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
 
        jmp     startup_32
 SYM_FUNC_END(efi32_stub_entry)
@@ -445,70 +449,17 @@ trampoline_return:
 SYM_CODE_END(startup_64)
 
 #ifdef CONFIG_EFI_STUB
-
-/* The entry point for the PE/COFF executable is efi_pe_entry. */
-SYM_FUNC_START(efi_pe_entry)
-       movq    %rcx, efi64_config(%rip)        /* Handle */
-       movq    %rdx, efi64_config+8(%rip) /* EFI System table pointer */
-
-       leaq    efi64_config(%rip), %rax
-       movq    %rax, efi_config(%rip)
-
-       call    1f
-1:     popq    %rbp
-       subq    $1b, %rbp
-
-       /*
-        * Relocate efi_config->call().
-        */
-       addq    %rbp, efi64_config+40(%rip)
-
-       movq    %rax, %rdi
-       call    make_boot_params
-       cmpq    $0,%rax
-       je      fail
-       mov     %rax, %rsi
-       leaq    startup_32(%rip), %rax
-       movl    %eax, BP_code32_start(%rsi)
-       jmp     2f              /* Skip the relocation */
-
-handover_entry:
-       call    1f
-1:     popq    %rbp
-       subq    $1b, %rbp
-
-       /*
-        * Relocate efi_config->call().
-        */
-       movq    efi_config(%rip), %rax
-       addq    %rbp, 40(%rax)
-2:
-       movq    efi_config(%rip), %rdi
+       .org 0x390
+SYM_FUNC_START(efi64_stub_entry)
+SYM_FUNC_START_ALIAS(efi_stub_entry)
+       and     $~0xf, %rsp                     /* realign the stack */
        call    efi_main
        movq    %rax,%rsi
-       cmpq    $0,%rax
-       jne     2f
-fail:
-       /* EFI init failed, so hang. */
-       hlt
-       jmp     fail
-2:
        movl    BP_code32_start(%esi), %eax
        leaq    startup_64(%rax), %rax
        jmp     *%rax
-SYM_FUNC_END(efi_pe_entry)
-
-       .org 0x390
-SYM_FUNC_START(efi64_stub_entry)
-       movq    %rdi, efi64_config(%rip)        /* Handle */
-       movq    %rsi, efi64_config+8(%rip) /* EFI System table pointer */
-
-       leaq    efi64_config(%rip), %rax
-       movq    %rax, efi_config(%rip)
-
-       movq    %rdx, %rsi
-       jmp     handover_entry
 SYM_FUNC_END(efi64_stub_entry)
+SYM_FUNC_END_ALIAS(efi_stub_entry)
 #endif
 
        .text
@@ -677,24 +628,11 @@ SYM_DATA_START_LOCAL(gdt)
        .quad   0x0000000000000000      /* TS continued */
 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
 
-#ifdef CONFIG_EFI_STUB
-SYM_DATA_LOCAL(efi_config, .quad 0)
-
 #ifdef CONFIG_EFI_MIXED
-SYM_DATA_START(efi32_config)
-       .fill   5,8,0
-       .quad   efi64_thunk
-       .byte   0
-SYM_DATA_END(efi32_config)
+SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0)
+SYM_DATA(efi_is64, .byte 1)
 #endif
 
-SYM_DATA_START(efi64_config)
-       .fill   5,8,0
-       .quad   efi_call
-       .byte   1
-SYM_DATA_END(efi64_config)
-#endif /* CONFIG_EFI_STUB */
-
 /*
  * Stack and heap for uncompression
  */
index 0149e41d42c270c2a9337f2890e698c9870eb33c..3da1c37c6dd533a9591168cc00e1c2211fb5211f 100644 (file)
@@ -51,7 +51,10 @@ SECTIONS
        . = ALIGN(16);
        _end = .;
 
-       /DISCARD/ : { *(.note*) }
+       /DISCARD/       : {
+               *(.eh_frame)
+               *(.note*)
+       }
 
        /*
         * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
index 76942cbd95a115c348ba211e42ca7133ad44a99c..f2bb91e87877c35eab1001abecaca071f5f37ffe 100644 (file)
@@ -1728,7 +1728,7 @@ SYM_CODE_END(nmi)
 SYM_CODE_START(ignore_sysret)
        UNWIND_HINT_EMPTY
        mov     $-ENOSYS, %eax
-       sysret
+       sysretl
 SYM_CODE_END(ignore_sysret)
 #endif
 
index 93c6dc7812d04602c9c00f3ca3f60ea7490c757e..ea7e0155c604ee85b7f741b75b5e367419dda69a 100644 (file)
@@ -16,18 +16,23 @@ SECTIONS
         * segment.
         */
 
-       vvar_start = . - 3 * PAGE_SIZE;
-       vvar_page = vvar_start;
+       vvar_start = . - 4 * PAGE_SIZE;
+       vvar_page  = vvar_start;
 
        /* Place all vvars at the offsets in asm/vvar.h. */
 #define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
-#define __VVAR_KERNEL_LDS
 #include <asm/vvar.h>
-#undef __VVAR_KERNEL_LDS
 #undef EMIT_VVAR
 
        pvclock_page = vvar_start + PAGE_SIZE;
        hvclock_page = vvar_start + 2 * PAGE_SIZE;
+       timens_page  = vvar_start + 3 * PAGE_SIZE;
+
+#undef _ASM_X86_VVAR_H
+       /* Place all vvars in timens too at the offsets in asm/vvar.h. */
+#define EMIT_VVAR(name, offset) timens_ ## name = timens_page + offset;
+#include <asm/vvar.h>
+#undef EMIT_VVAR
 
        . = SIZEOF_HEADERS;
 
index 3a4d8d4d39f87bb073c4c8b7504a293344ef3cfe..3842873b3ae3b6c5af587544e8e4c4804ea1f3af 100644 (file)
@@ -75,12 +75,14 @@ enum {
        sym_vvar_page,
        sym_pvclock_page,
        sym_hvclock_page,
+       sym_timens_page,
 };
 
 const int special_pages[] = {
        sym_vvar_page,
        sym_pvclock_page,
        sym_hvclock_page,
+       sym_timens_page,
 };
 
 struct vdso_sym {
@@ -93,6 +95,7 @@ struct vdso_sym required_syms[] = {
        [sym_vvar_page] = {"vvar_page", true},
        [sym_pvclock_page] = {"pvclock_page", true},
        [sym_hvclock_page] = {"hvclock_page", true},
+       [sym_timens_page] = {"timens_page", true},
        {"VDSO32_NOTE_MASK", true},
        {"__kernel_vsyscall", true},
        {"__kernel_sigreturn", true},
index f5937742b2901c80947714e8e12c0cc75ba3423d..c1b8496b56067e212e8d5495dc2dc507559a3f49 100644 (file)
 #include <linux/elf.h>
 #include <linux/cpu.h>
 #include <linux/ptrace.h>
+#include <linux/time_namespace.h>
+
 #include <asm/pvclock.h>
 #include <asm/vgtod.h>
 #include <asm/proto.h>
 #include <asm/vdso.h>
 #include <asm/vvar.h>
+#include <asm/tlb.h>
 #include <asm/page.h>
 #include <asm/desc.h>
 #include <asm/cpufeature.h>
 #include <clocksource/hyperv_timer.h>
 
+#undef _ASM_X86_VVAR_H
+#define EMIT_VVAR(name, offset)        \
+       const size_t name ## _offset = offset;
+#include <asm/vvar.h>
+
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+       return (struct vdso_data *)(vvar_page + _vdso_data_offset);
+}
+#undef EMIT_VVAR
+
 #if defined(CONFIG_X86_64)
 unsigned int __read_mostly vdso64_enabled = 1;
 #endif
@@ -37,6 +51,7 @@ void __init init_vdso_image(const struct vdso_image *image)
                                                image->alt_len));
 }
 
+static const struct vm_special_mapping vvar_mapping;
 struct linux_binprm;
 
 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
@@ -84,10 +99,74 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
        return 0;
 }
 
+static int vvar_mremap(const struct vm_special_mapping *sm,
+               struct vm_area_struct *new_vma)
+{
+       const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
+       unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+
+       if (new_size != -image->sym_vvar_start)
+               return -EINVAL;
+
+       return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+       if (likely(vma->vm_mm == current->mm))
+               return current->nsproxy->time_ns->vvar_page;
+
+       /*
+        * VM_PFNMAP | VM_IO protect .fault() handler from being called
+        * through interfaces like /proc/$pid/mem or
+        * process_vm_{readv,writev}() as long as there's no .access()
+        * in special_mapping_vmops().
+        * For more details check_vma_flags() and __access_remote_vm()
+        */
+
+       WARN(1, "vvar_page accessed remotely");
+
+       return NULL;
+}
+
+/*
+ * The vvar page layout depends on whether a task belongs to the root or
+ * non-root time namespace. Whenever a task changes its namespace, the VVAR
+ * page tables are cleared and then they will re-faulted with a
+ * corresponding layout.
+ * See also the comment near timens_setup_vdso_data() for details.
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+       struct mm_struct *mm = task->mm;
+       struct vm_area_struct *vma;
+
+       if (down_write_killable(&mm->mmap_sem))
+               return -EINTR;
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               unsigned long size = vma->vm_end - vma->vm_start;
+
+               if (vma_is_special_mapping(vma, &vvar_mapping))
+                       zap_page_range(vma, vma->vm_start, size);
+       }
+
+       up_write(&mm->mmap_sem);
+       return 0;
+}
+#else
+static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+       return NULL;
+}
+#endif
+
 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
                      struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        const struct vdso_image *image = vma->vm_mm->context.vdso_image;
+       unsigned long pfn;
        long sym_offset;
 
        if (!image)
@@ -107,8 +186,36 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
                return VM_FAULT_SIGBUS;
 
        if (sym_offset == image->sym_vvar_page) {
-               return vmf_insert_pfn(vma, vmf->address,
-                               __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
+               struct page *timens_page = find_timens_vvar_page(vma);
+
+               pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
+
+               /*
+                * If a task belongs to a time namespace then a namespace
+                * specific VVAR is mapped with the sym_vvar_page offset and
+                * the real VVAR page is mapped with the sym_timens_page
+                * offset.
+                * See also the comment near timens_setup_vdso_data().
+                */
+               if (timens_page) {
+                       unsigned long addr;
+                       vm_fault_t err;
+
+                       /*
+                        * Optimization: inside time namespace pre-fault
+                        * VVAR page too. As on timens page there are only
+                        * offsets for clocks on VVAR, it'll be faulted
+                        * shortly by VDSO code.
+                        */
+                       addr = vmf->address + (image->sym_timens_page - sym_offset);
+                       err = vmf_insert_pfn(vma, addr, pfn);
+                       if (unlikely(err & VM_FAULT_ERROR))
+                               return err;
+
+                       pfn = page_to_pfn(timens_page);
+               }
+
+               return vmf_insert_pfn(vma, vmf->address, pfn);
        } else if (sym_offset == image->sym_pvclock_page) {
                struct pvclock_vsyscall_time_info *pvti =
                        pvclock_get_pvti_cpu0_va();
@@ -123,6 +230,14 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
                if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
                        return vmf_insert_pfn(vma, vmf->address,
                                        virt_to_phys(tsc_pg) >> PAGE_SHIFT);
+       } else if (sym_offset == image->sym_timens_page) {
+               struct page *timens_page = find_timens_vvar_page(vma);
+
+               if (!timens_page)
+                       return VM_FAULT_SIGBUS;
+
+               pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
+               return vmf_insert_pfn(vma, vmf->address, pfn);
        }
 
        return VM_FAULT_SIGBUS;
@@ -136,6 +251,7 @@ static const struct vm_special_mapping vdso_mapping = {
 static const struct vm_special_mapping vvar_mapping = {
        .name = "[vvar]",
        .fault = vvar_fault,
+       .mremap = vvar_mremap,
 };
 
 /*
index a7752cd78b89c20621d5e296eac25de3ac021421..1f22b6bbda68d1bac825f7f25351f308e96f1f6f 100644 (file)
 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
 static unsigned long perf_nmi_window;
 
+/* AMD Event 0xFFF: Merge.  Used with Large Increment per Cycle events */
+#define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
+#define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
+
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -301,6 +305,25 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
        return offset;
 }
 
+/*
+ * AMD64 events are detected based on their event codes.
+ */
+static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
+{
+       return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+}
+
+static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
+{
+       if (!(x86_pmu.flags & PMU_FL_PAIR))
+               return false;
+
+       switch (amd_get_event_code(hwc)) {
+       case 0x003:     return true;    /* Retired SSE/AVX FLOPs */
+       default:        return false;
+       }
+}
+
 static int amd_core_hw_config(struct perf_event *event)
 {
        if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -316,15 +339,10 @@ static int amd_core_hw_config(struct perf_event *event)
        else if (event->attr.exclude_guest)
                event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
 
-       return 0;
-}
+       if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
+               event->hw.flags |= PERF_X86_EVENT_PAIR;
 
-/*
- * AMD64 events are detected based on their event codes.
- */
-static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
-{
-       return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+       return 0;
 }
 
 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
@@ -855,6 +873,29 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
        }
 }
 
+static struct event_constraint pair_constraint;
+
+static struct event_constraint *
+amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
+                              struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (amd_is_pair_event_code(hwc))
+               return &pair_constraint;
+
+       return &unconstrained;
+}
+
+static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
+                                          struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (is_counter_pair(hwc))
+               --cpuc->n_pair;
+}
+
 static ssize_t amd_event_sysfs_show(char *page, u64 config)
 {
        u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
@@ -898,33 +939,15 @@ static __initconst const struct x86_pmu amd_pmu = {
 
 static int __init amd_core_pmu_init(void)
 {
+       u64 even_ctr_mask = 0ULL;
+       int i;
+
        if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                return 0;
 
-       /* Avoid calulating the value each time in the NMI handler */
+       /* Avoid calculating the value each time in the NMI handler */
        perf_nmi_window = msecs_to_jiffies(100);
 
-       switch (boot_cpu_data.x86) {
-       case 0x15:
-               pr_cont("Fam15h ");
-               x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
-               break;
-       case 0x17:
-               pr_cont("Fam17h ");
-               /*
-                * In family 17h, there are no event constraints in the PMC hardware.
-                * We fallback to using default amd_get_event_constraints.
-                */
-               break;
-       case 0x18:
-               pr_cont("Fam18h ");
-               /* Using default amd_get_event_constraints. */
-               break;
-       default:
-               pr_err("core perfctr but no constraints; unknown hardware!\n");
-               return -ENODEV;
-       }
-
        /*
         * If core performance counter extensions exists, we must use
         * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
@@ -939,6 +962,32 @@ static int __init amd_core_pmu_init(void)
         */
        x86_pmu.amd_nb_constraints = 0;
 
+       if (boot_cpu_data.x86 == 0x15) {
+               pr_cont("Fam15h ");
+               x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+       }
+       if (boot_cpu_data.x86 >= 0x17) {
+               pr_cont("Fam17h+ ");
+               /*
+                * Family 17h and compatibles have constraints for Large
+                * Increment per Cycle events: they may only be assigned an
+                * even numbered counter that has a consecutive adjacent odd
+                * numbered counter following it.
+                */
+               for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
+                       even_ctr_mask |= 1 << i;
+
+               pair_constraint = (struct event_constraint)
+                                   __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
+                                   x86_pmu.num_counters / 2, 0,
+                                   PERF_X86_EVENT_PAIR);
+
+               x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
+               x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
+               x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
+               x86_pmu.flags |= PMU_FL_PAIR;
+       }
+
        pr_cont("core perfctr, ");
        return 0;
 }
index 9a89d98c55bd8e87a245024bb0fa635ab0f56dd3..3bb738f5a47201e79b0db21e12681590d92e617f 100644 (file)
@@ -376,7 +376,7 @@ int x86_add_exclusive(unsigned int what)
         * LBR and BTS are still mutually exclusive.
         */
        if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
-               return 0;
+               goto out;
 
        if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
                mutex_lock(&pmc_reserve_mutex);
@@ -388,6 +388,7 @@ int x86_add_exclusive(unsigned int what)
                mutex_unlock(&pmc_reserve_mutex);
        }
 
+out:
        atomic_inc(&active_events);
        return 0;
 
@@ -398,11 +399,15 @@ fail_unlock:
 
 void x86_del_exclusive(unsigned int what)
 {
+       atomic_dec(&active_events);
+
+       /*
+        * See the comment in x86_add_exclusive().
+        */
        if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
                return;
 
        atomic_dec(&x86_pmu.lbr_exclusive[what]);
-       atomic_dec(&active_events);
 }
 
 int x86_setup_perfctr(struct perf_event *event)
@@ -613,6 +618,7 @@ void x86_pmu_disable_all(void)
        int idx;
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
                u64 val;
 
                if (!test_bit(idx, cpuc->active_mask))
@@ -622,6 +628,8 @@ void x86_pmu_disable_all(void)
                        continue;
                val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
                wrmsrl(x86_pmu_config_addr(idx), val);
+               if (is_counter_pair(hwc))
+                       wrmsrl(x86_pmu_config_addr(idx + 1), 0);
        }
 }
 
@@ -694,7 +702,7 @@ struct sched_state {
        int     counter;        /* counter index */
        int     unassigned;     /* number of events to be assigned left */
        int     nr_gp;          /* number of GP counters used */
-       unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       u64     used;
 };
 
 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
@@ -751,8 +759,12 @@ static bool perf_sched_restore_state(struct perf_sched *sched)
        sched->saved_states--;
        sched->state = sched->saved[sched->saved_states];
 
-       /* continue with next counter: */
-       clear_bit(sched->state.counter++, sched->state.used);
+       /* this assignment didn't work out */
+       /* XXX broken vs EVENT_PAIR */
+       sched->state.used &= ~BIT_ULL(sched->state.counter);
+
+       /* try the next one */
+       sched->state.counter++;
 
        return true;
 }
@@ -777,20 +789,32 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
        if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
                idx = INTEL_PMC_IDX_FIXED;
                for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
-                       if (!__test_and_set_bit(idx, sched->state.used))
-                               goto done;
+                       u64 mask = BIT_ULL(idx);
+
+                       if (sched->state.used & mask)
+                               continue;
+
+                       sched->state.used |= mask;
+                       goto done;
                }
        }
 
        /* Grab the first unused counter starting with idx */
        idx = sched->state.counter;
        for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
-               if (!__test_and_set_bit(idx, sched->state.used)) {
-                       if (sched->state.nr_gp++ >= sched->max_gp)
-                               return false;
+               u64 mask = BIT_ULL(idx);
 
-                       goto done;
-               }
+               if (c->flags & PERF_X86_EVENT_PAIR)
+                       mask |= mask << 1;
+
+               if (sched->state.used & mask)
+                       continue;
+
+               if (sched->state.nr_gp++ >= sched->max_gp)
+                       return false;
+
+               sched->state.used |= mask;
+               goto done;
        }
 
        return false;
@@ -867,12 +891,10 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
 {
        struct event_constraint *c;
-       unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        struct perf_event *e;
        int n0, i, wmin, wmax, unsched = 0;
        struct hw_perf_event *hwc;
-
-       bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+       u64 used_mask = 0;
 
        /*
         * Compute the number of events already present; see x86_pmu_add(),
@@ -915,6 +937,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
         * fastpath, try to reuse previous register
         */
        for (i = 0; i < n; i++) {
+               u64 mask;
+
                hwc = &cpuc->event_list[i]->hw;
                c = cpuc->event_constraint[i];
 
@@ -926,11 +950,16 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                if (!test_bit(hwc->idx, c->idxmsk))
                        break;
 
+               mask = BIT_ULL(hwc->idx);
+               if (is_counter_pair(hwc))
+                       mask |= mask << 1;
+
                /* not already used */
-               if (test_bit(hwc->idx, used_mask))
+               if (used_mask & mask)
                        break;
 
-               __set_bit(hwc->idx, used_mask);
+               used_mask |= mask;
+
                if (assign)
                        assign[i] = hwc->idx;
        }
@@ -953,6 +982,15 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                    READ_ONCE(cpuc->excl_cntrs->exclusive_present))
                        gpmax /= 2;
 
+               /*
+                * Reduce the amount of available counters to allow fitting
+                * the extra Merge events needed by large increment events.
+                */
+               if (x86_pmu.flags & PMU_FL_PAIR) {
+                       gpmax = x86_pmu.num_counters - cpuc->n_pair;
+                       WARN_ON(gpmax <= 0);
+               }
+
                unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
                                             wmax, gpmax, assign);
        }
@@ -1033,6 +1071,8 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
                        return -EINVAL;
                cpuc->event_list[n] = leader;
                n++;
+               if (is_counter_pair(&leader->hw))
+                       cpuc->n_pair++;
        }
        if (!dogrp)
                return n;
@@ -1047,6 +1087,8 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 
                cpuc->event_list[n] = event;
                n++;
+               if (is_counter_pair(&event->hw))
+                       cpuc->n_pair++;
        }
        return n;
 }
@@ -1232,6 +1274,13 @@ int x86_perf_event_set_period(struct perf_event *event)
 
        wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
 
+       /*
+        * Clear the Merge event counter's upper 16 bits since
+        * we currently declare a 48-bit counter width
+        */
+       if (is_counter_pair(hwc))
+               wrmsrl(x86_pmu_event_addr(idx + 1), 0);
+
        /*
         * Due to erratum on certan cpu we need
         * a second write to be sure the register
@@ -1642,9 +1691,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
 
 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
 {
-       struct perf_pmu_events_attr *pmu_attr = \
+       struct perf_pmu_events_attr *pmu_attr =
                container_of(attr, struct perf_pmu_events_attr, attr);
-       u64 config = x86_pmu.event_map(pmu_attr->id);
+       u64 config = 0;
+
+       if (pmu_attr->id < x86_pmu.max_events)
+               config = x86_pmu.event_map(pmu_attr->id);
 
        /* string trumps id */
        if (pmu_attr->event_str)
@@ -1713,6 +1765,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx)
 {
        struct perf_pmu_events_attr *pmu_attr;
 
+       if (idx >= x86_pmu.max_events)
+               return 0;
+
        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
        /* str trumps id */
        return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
index 38de4a7f6752b23a58ed8e68934017def83969ed..6a3b599ee0fe7df0fe8010218100e8f9db08acf5 100644 (file)
@@ -63,9 +63,17 @@ struct bts_buffer {
 
 static struct pmu bts_pmu;
 
+static int buf_nr_pages(struct page *page)
+{
+       if (!PagePrivate(page))
+               return 1;
+
+       return 1 << page_private(page);
+}
+
 static size_t buf_size(struct page *page)
 {
-       return 1 << (PAGE_SHIFT + page_private(page));
+       return buf_nr_pages(page) * PAGE_SIZE;
 }
 
 static void *
@@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
        /* count all the high order buffers */
        for (pg = 0, nbuf = 0; pg < nr_pages;) {
                page = virt_to_page(pages[pg]);
-               if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
-                       return NULL;
-               pg += 1 << page_private(page);
+               pg += buf_nr_pages(page);
                nbuf++;
        }
 
@@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
                unsigned int __nr_pages;
 
                page = virt_to_page(pages[pg]);
-               __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
+               __nr_pages = buf_nr_pages(page);
                buf->buf[nbuf].page = page;
                buf->buf[nbuf].offset = offset;
                buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
index ce83950036c56ad8f88459a6a6f7a44decdeeb04..4b94ae4ae369a37235981538e3316de60da7246a 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/perf_event.h>
 #include <asm/tlbflush.h>
 #include <asm/insn.h>
+#include <asm/io.h>
 
 #include "../perf_event.h"
 
index 5053a403e4ae0e9e9bea4831fa01b33696c5b9f5..09913121e7263918a549eac45fda3de517e0ce01 100644 (file)
@@ -741,6 +741,8 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS,     model_hsw),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L,              model_skl),
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE,                model_skl),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_COMETLAKE_L,            model_skl),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_COMETLAKE,              model_skl),
        {},
 };
 
index dbaa1b088a30e6106b680a666540016180c694ad..c37cb12d0ef68db47b1054113ce53f7e70a6486d 100644 (file)
@@ -15,6 +15,7 @@
 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC         0x1910
 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC         0x190f
 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC         0x191f
+#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC         0x1918
 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC          0x590c
 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC          0x5904
 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC         0x5914
@@ -657,6 +658,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
        { /* IMC */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
@@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
        IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
        IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
+       IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
        IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
        IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
        IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
index b10a5ec79e48aac1af8d9edb1396e22449dd8d14..ad20220af303ad8a28acb865123c4a41267c87e9 100644 (file)
 #define SNR_M2M_PCI_PMON_BOX_CTL               0x438
 #define SNR_M2M_PCI_PMON_UMASK_EXT             0xff
 
-/* SNR PCIE3 */
-#define SNR_PCIE3_PCI_PMON_CTL0                        0x508
-#define SNR_PCIE3_PCI_PMON_CTR0                        0x4e8
-#define SNR_PCIE3_PCI_PMON_BOX_CTL             0x4e4
-
 /* SNR IMC */
 #define SNR_IMC_MMIO_PMON_FIXED_CTL            0x54
 #define SNR_IMC_MMIO_PMON_FIXED_CTR            0x38
@@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = {
        .format_group   = &snr_m2m_uncore_format_group,
 };
 
-static struct intel_uncore_type snr_uncore_pcie3 = {
-       .name           = "pcie3",
-       .num_counters   = 4,
-       .num_boxes      = 1,
-       .perf_ctr_bits  = 48,
-       .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
-       .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
-       .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
-       .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
-       .ops            = &ivbep_uncore_pci_ops,
-       .format_group   = &ivbep_uncore_format_group,
-};
-
 enum {
        SNR_PCI_UNCORE_M2M,
-       SNR_PCI_UNCORE_PCIE3,
 };
 
 static struct intel_uncore_type *snr_pci_uncores[] = {
        [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
-       [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
        NULL,
 };
 
@@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
        },
-       { /* PCIe3 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
-       },
        { /* end: all zeroes */ }
 };
 
@@ -4536,6 +4512,7 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
        INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
        INTEL_UNCORE_EVENT_DESC(write.scale,    "3.814697266e-6"),
        INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
+       { /* end: all zeroes */ },
 };
 
 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
index 930611db8f9adf43251d94f533079905731de828..f1cd1ca1a77b8cdaf02e53db1c2a655a3f4d7c4f 100644 (file)
@@ -77,6 +77,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
 #define PERF_X86_EVENT_AUTO_RELOAD     0x0200 /* use PEBS auto-reload */
 #define PERF_X86_EVENT_LARGE_PEBS      0x0400 /* use large PEBS */
 #define PERF_X86_EVENT_PEBS_VIA_PT     0x0800 /* use PT buffer for PEBS */
+#define PERF_X86_EVENT_PAIR            0x1000 /* Large Increment per Cycle */
 
 struct amd_nb {
        int nb_id;  /* NorthBridge id */
@@ -272,6 +273,7 @@ struct cpu_hw_events {
        struct amd_nb                   *amd_nb;
        /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
        u64                             perf_ctr_virt_mask;
+       int                             n_pair; /* Large increment events */
 
        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 };
@@ -694,6 +696,7 @@ struct x86_pmu {
         * AMD bits
         */
        unsigned int    amd_nb_constraints : 1;
+       u64             perf_ctr_pair_en;
 
        /*
         * Extra registers for events
@@ -743,6 +746,7 @@ do {                                                                        \
 #define PMU_FL_EXCL_ENABLED    0x8 /* exclusive counter active */
 #define PMU_FL_PEBS_ALL                0x10 /* all events are valid PEBS events */
 #define PMU_FL_TFA             0x20 /* deal with TSX force abort */
+#define PMU_FL_PAIR            0x40 /* merge counters for large incr. events */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -838,6 +842,11 @@ int x86_pmu_hw_config(struct perf_event *event);
 
 void x86_pmu_disable_all(void);
 
+static inline bool is_counter_pair(struct hw_perf_event *hwc)
+{
+       return hwc->flags & PERF_X86_EVENT_PAIR;
+}
+
 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
                                          u64 enable_mask)
 {
@@ -845,6 +854,14 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 
        if (hwc->extra_reg.reg)
                wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
+
+       /*
+        * Add enabled Merge event on next counter
+        * if large increment event being enabled on this counter
+        */
+       if (is_counter_pair(hwc))
+               wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
+
        wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
 }
 
@@ -861,6 +878,9 @@ static inline void x86_pmu_disable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
 
        wrmsrl(hwc->config_base, hwc->config);
+
+       if (is_counter_pair(hwc))
+               wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
 }
 
 void x86_pmu_enable_event(struct perf_event *event);
index 30416d7f19d4f8b94248e9e4e149fb21b5e26f8b..a3aefe9b94012ab028a62f0f8b97e20b238f0076 100644 (file)
@@ -114,8 +114,6 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
 
        err |= fpu__restore_sig(buf, 1);
 
-       force_iret();
-
        return err;
 }
 
index bc9693c9107e8163cb67d95e680c755601344a84..ca0976456a6b0e5d1e336f6f021658125b61aaab 100644 (file)
@@ -13,7 +13,6 @@
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
-#include <asm/realmode.h>
 #include <asm/x86_init.h>
 
 #ifdef CONFIG_ACPI_APEI
@@ -62,7 +61,7 @@ static inline void acpi_disable_pci(void)
 extern int (*acpi_suspend_lowlevel)(void);
 
 /* Physical address to resume after wakeup */
-#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
+unsigned long acpi_get_wakeup_address(void);
 
 /*
  * Check if the CPU can handle C2 and deeper
index 804734058c778f3f1f1df7b9f2f426ca365febf7..02c0078d3787b46c5e3bc59a1a4e2af4040a0b3e 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/percpu-defs.h>
 #include <asm/processor.h>
 #include <asm/intel_ds.h>
+#include <asm/pgtable_areas.h>
 
 #ifdef CONFIG_X86_64
 
@@ -134,15 +135,6 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
 extern void setup_cpu_entry_areas(void);
 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
 
-/* Single page reserved for the readonly IDT mapping: */
-#define        CPU_ENTRY_AREA_RO_IDT           CPU_ENTRY_AREA_BASE
-#define CPU_ENTRY_AREA_PER_CPU         (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
-
-#define CPU_ENTRY_AREA_RO_IDT_VADDR    ((void *)CPU_ENTRY_AREA_RO_IDT)
-
-#define CPU_ENTRY_AREA_MAP_SIZE                        \
-       (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
-
 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
 
 static inline struct entry_stack *cpu_entry_stack(int cpu)
index e9b62498fe75a3f3fce3692678e5ff28bbb28880..98c60fa31cedab4417daf1b183e0c478378b4295 100644 (file)
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW      (18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS      (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_FSRM               (18*32+ 4) /* Fast Short Rep Mov */
 #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
 #define X86_FEATURE_MD_CLEAR           (18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
index d028e9acdf1c05964970d467a347aaeff987d875..86169a24b0d8baa66a05e0466344aa727efac270 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/tlb.h>
 #include <asm/nospec-branch.h>
 #include <asm/mmu_context.h>
+#include <linux/build_bug.h>
 
 /*
  * We map the EFI regions needed for runtime services non-contiguously,
  * This is the main reason why we're doing stable VA mappings for RT
  * services.
  *
- * This flag is used in conjunction with a chicken bit called
- * "efi=old_map" which can be used as a fallback to the old runtime
- * services mapping method in case there's some b0rkage with a
- * particular EFI implementation (haha, it is hard to hold up the
- * sarcasm here...).
+ * SGI UV1 machines are known to be incompatible with this scheme, so we
+ * provide an opt-out for these machines via a DMI quirk that sets the
+ * attribute below.
  */
-#define EFI_OLD_MEMMAP         EFI_ARCH_1
+#define EFI_UV1_MEMMAP         EFI_ARCH_1
+
+static inline bool efi_have_uv1_memmap(void)
+{
+       return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP);
+}
 
 #define EFI32_LOADER_SIGNATURE "EL32"
 #define EFI64_LOADER_SIGNATURE "EL64"
 
 #define ARCH_EFI_IRQ_FLAGS_MASK        X86_EFLAGS_IF
 
-#ifdef CONFIG_X86_32
+/*
+ * The EFI services are called through variadic functions in many cases. These
+ * functions are implemented in assembler and support only a fixed number of
+ * arguments. The macros below allows us to check at build time that we don't
+ * try to call them with too many arguments.
+ *
+ * __efi_nargs() will return the number of arguments if it is 7 or less, and
+ * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
+ * impossible to calculate the exact number of arguments beyond some
+ * pre-defined limit. The maximum number of arguments currently supported by
+ * any of the thunks is 7, so this is good enough for now and can be extended
+ * in the obvious way if we ever need more.
+ */
 
-extern asmlinkage unsigned long efi_call_phys(void *, ...);
+#define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
+#define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__,      \
+       __efi_arg_sentinel(7), __efi_arg_sentinel(6),           \
+       __efi_arg_sentinel(5), __efi_arg_sentinel(4),           \
+       __efi_arg_sentinel(3), __efi_arg_sentinel(2),           \
+       __efi_arg_sentinel(1), __efi_arg_sentinel(0))
+#define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...)  \
+       __take_second_arg(n,                                    \
+               ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; }))
+#define __efi_arg_sentinel(n) , n
 
+/*
+ * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
+ * represents more than n arguments.
+ */
+
+#define __efi_nargs_check(f, n, ...)                                   \
+       __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
+#define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
+#define __efi_nargs_check__(f, p, n) ({                                        \
+       BUILD_BUG_ON_MSG(                                               \
+               (p) > (n),                                              \
+               #f " called with too many arguments (" #p ">" #n ")");  \
+})
+
+#ifdef CONFIG_X86_32
 #define arch_efi_call_virt_setup()                                     \
 ({                                                                     \
        kernel_fpu_begin();                                             \
@@ -51,13 +91,7 @@ extern asmlinkage unsigned long efi_call_phys(void *, ...);
 })
 
 
-/*
- * Wrap all the virtual calls in a way that forces the parameters on the stack.
- */
-#define arch_efi_call_virt(p, f, args...)                              \
-({                                                                     \
-       ((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args);        \
-})
+#define arch_efi_call_virt(p, f, args...)      p->f(args)
 
 #define efi_ioremap(addr, size, type, attr)    ioremap_cache(addr, size)
 
@@ -65,9 +99,12 @@ extern asmlinkage unsigned long efi_call_phys(void *, ...);
 
 #define EFI_LOADER_SIGNATURE   "EL64"
 
-extern asmlinkage u64 efi_call(void *fp, ...);
+extern asmlinkage u64 __efi_call(void *fp, ...);
 
-#define efi_call_phys(f, args...)              efi_call((f), args)
+#define efi_call(...) ({                                               \
+       __efi_nargs_check(efi_call, 7, __VA_ARGS__);                    \
+       __efi_call(__VA_ARGS__);                                        \
+})
 
 /*
  * struct efi_scratch - Scratch space used while switching to/from efi_mm
@@ -85,7 +122,7 @@ struct efi_scratch {
        kernel_fpu_begin();                                             \
        firmware_restrict_branch_speculation_start();                   \
                                                                        \
-       if (!efi_enabled(EFI_OLD_MEMMAP))                               \
+       if (!efi_have_uv1_memmap())                                     \
                efi_switch_mm(&efi_mm);                                 \
 })
 
@@ -94,7 +131,7 @@ struct efi_scratch {
 
 #define arch_efi_call_virt_teardown()                                  \
 ({                                                                     \
-       if (!efi_enabled(EFI_OLD_MEMMAP))                               \
+       if (!efi_have_uv1_memmap())                                     \
                efi_switch_mm(efi_scratch.prev_mm);                     \
                                                                        \
        firmware_restrict_branch_speculation_end();                     \
@@ -121,8 +158,6 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 extern struct efi_scratch efi_scratch;
 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
 extern int __init efi_memblock_x86_reserve_range(void);
-extern pgd_t * __init efi_call_phys_prolog(void);
-extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
 extern void __init efi_print_memmap(void);
 extern void __init efi_memory_uc(u64 addr, unsigned long size);
 extern void __init efi_map_region(efi_memory_desc_t *md);
@@ -140,6 +175,8 @@ extern void efi_delete_dummy_variable(void);
 extern void efi_switch_mm(struct mm_struct *mm);
 extern void efi_recover_from_page_fault(unsigned long phys_addr);
 extern void efi_free_boot_services(void);
+extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
+extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
 
 struct efi_setup_data {
        u64 fw_vendor;
@@ -152,93 +189,144 @@ struct efi_setup_data {
 extern u64 efi_setup;
 
 #ifdef CONFIG_EFI
+extern efi_status_t __efi64_thunk(u32, ...);
 
-static inline bool efi_is_native(void)
+#define efi64_thunk(...) ({                                            \
+       __efi_nargs_check(efi64_thunk, 6, __VA_ARGS__);                 \
+       __efi64_thunk(__VA_ARGS__);                                     \
+})
+
+static inline bool efi_is_mixed(void)
 {
-       return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+       if (!IS_ENABLED(CONFIG_EFI_MIXED))
+               return false;
+       return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
 }
 
 static inline bool efi_runtime_supported(void)
 {
-       if (efi_is_native())
-               return true;
-
-       if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
+       if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
                return true;
 
-       return false;
+       return IS_ENABLED(CONFIG_EFI_MIXED);
 }
 
 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
 
 extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
 
-#ifdef CONFIG_EFI_MIXED
 extern void efi_thunk_runtime_setup(void);
-extern efi_status_t efi_thunk_set_virtual_address_map(
-       void *phys_set_virtual_address_map,
-       unsigned long memory_map_size,
-       unsigned long descriptor_size,
-       u32 descriptor_version,
-       efi_memory_desc_t *virtual_map);
-#else
-static inline void efi_thunk_runtime_setup(void) {}
-static inline efi_status_t efi_thunk_set_virtual_address_map(
-       void *phys_set_virtual_address_map,
-       unsigned long memory_map_size,
-       unsigned long descriptor_size,
-       u32 descriptor_version,
-       efi_memory_desc_t *virtual_map)
-{
-       return EFI_SUCCESS;
-}
-#endif /* CONFIG_EFI_MIXED */
-
+efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
+                                        unsigned long descriptor_size,
+                                        u32 descriptor_version,
+                                        efi_memory_desc_t *virtual_map);
 
 /* arch specific definitions used by the stub code */
 
-struct efi_config {
-       u64 image_handle;
-       u64 table;
-       u64 runtime_services;
-       u64 boot_services;
-       u64 text_output;
-       efi_status_t (*call)(unsigned long, ...);
-       bool is64;
-} __packed;
-
-__pure const struct efi_config *__efi_early(void);
+__attribute_const__ bool efi_is_64bit(void);
 
-static inline bool efi_is_64bit(void)
+static inline bool efi_is_native(void)
 {
        if (!IS_ENABLED(CONFIG_X86_64))
-               return false;
-
+               return true;
        if (!IS_ENABLED(CONFIG_EFI_MIXED))
                return true;
+       return efi_is_64bit();
+}
+
+#define efi_mixed_mode_cast(attr)                                      \
+       __builtin_choose_expr(                                          \
+               __builtin_types_compatible_p(u32, __typeof__(attr)),    \
+                       (unsigned long)(attr), (attr))
 
-       return __efi_early()->is64;
+#define efi_table_attr(inst, attr)                                     \
+       (efi_is_native()                                                \
+               ? inst->attr                                            \
+               : (__typeof__(inst->attr))                              \
+                       efi_mixed_mode_cast(inst->mixed_mode.attr))
+
+/*
+ * The following macros allow translating arguments if necessary from native to
+ * mixed mode. The use case for this is to initialize the upper 32 bits of
+ * output parameters, and where the 32-bit method requires a 64-bit argument,
+ * which must be split up into two arguments to be thunked properly.
+ *
+ * As examples, the AllocatePool boot service returns the address of the
+ * allocation, but it will not set the high 32 bits of the address. To ensure
+ * that the full 64-bit address is initialized, we zero-init the address before
+ * calling the thunk.
+ *
+ * The FreePages boot service takes a 64-bit physical address even in 32-bit
+ * mode. For the thunk to work correctly, a native 64-bit call of
+ *     free_pages(addr, size)
+ * must be translated to
+ *     efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
+ * so that the two 32-bit halves of addr get pushed onto the stack separately.
+ */
+
+static inline void *efi64_zero_upper(void *p)
+{
+       ((u32 *)p)[1] = 0;
+       return p;
 }
 
-#define efi_table_attr(table, attr, instance)                          \
-       (efi_is_64bit() ?                                               \
-               ((table##_64_t *)(unsigned long)instance)->attr :       \
-               ((table##_32_t *)(unsigned long)instance)->attr)
+#define __efi64_argmap_free_pages(addr, size)                          \
+       ((addr), 0, (size))
+
+#define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver)     \
+       ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))
+
+#define __efi64_argmap_allocate_pool(type, size, buffer)               \
+       ((type), (size), efi64_zero_upper(buffer))
 
-#define efi_call_proto(protocol, f, instance, ...)                     \
-       __efi_early()->call(efi_table_attr(protocol, f, instance),      \
-               instance, ##__VA_ARGS__)
+#define __efi64_argmap_handle_protocol(handle, protocol, interface)    \
+       ((handle), (protocol), efi64_zero_upper(interface))
 
-#define efi_call_early(f, ...)                                         \
-       __efi_early()->call(efi_table_attr(efi_boot_services, f,        \
-               __efi_early()->boot_services), __VA_ARGS__)
+#define __efi64_argmap_locate_protocol(protocol, reg, interface)       \
+       ((protocol), (reg), efi64_zero_upper(interface))
 
-#define __efi_call_early(f, ...)                                       \
-       __efi_early()->call((unsigned long)f, __VA_ARGS__);
+/* PCI I/O */
+#define __efi64_argmap_get_location(protocol, seg, bus, dev, func)     \
+       ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus),      \
+        efi64_zero_upper(dev), efi64_zero_upper(func))
+
+/*
+ * The macros below handle the plumbing for the argument mapping. To add a
+ * mapping for a specific EFI method, simply define a macro
+ * __efi64_argmap_<method name>, following the examples above.
+ */
 
-#define efi_call_runtime(f, ...)                                       \
-       __efi_early()->call(efi_table_attr(efi_runtime_services, f,     \
-               __efi_early()->runtime_services), __VA_ARGS__)
+#define __efi64_thunk_map(inst, func, ...)                             \
+       efi64_thunk(inst->mixed_mode.func,                              \
+               __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__),    \
+                              (__VA_ARGS__)))
+
+#define __efi64_argmap(mapped, args)                                   \
+       __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
+#define __efi64_argmap__0(mapped, args) __efi_eval mapped
+#define __efi64_argmap__1(mapped, args) __efi_eval args
+
+#define __efi_eat(...)
+#define __efi_eval(...) __VA_ARGS__
+
+/* The three macros below handle dispatching via the thunk if needed */
+
+#define efi_call_proto(inst, func, ...)                                        \
+       (efi_is_native()                                                \
+               ? inst->func(inst, ##__VA_ARGS__)                       \
+               : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__))
+
+#define efi_bs_call(func, ...)                                         \
+       (efi_is_native()                                                \
+               ? efi_system_table()->boottime->func(__VA_ARGS__)       \
+               : __efi64_thunk_map(efi_table_attr(efi_system_table(),  \
+                               boottime), func, __VA_ARGS__))
+
+#define efi_rt_call(func, ...)                                         \
+       (efi_is_native()                                                \
+               ? efi_system_table()->runtime->func(__VA_ARGS__)        \
+               : __efi64_thunk_map(efi_table_attr(efi_system_table(),  \
+                               runtime), func, __VA_ARGS__))
 
 extern bool efi_reboot_required(void);
 extern bool efi_is_table_address(unsigned long phys_addr);
index c2a7458f912c37def2f8926fc193ab56822be239..85be2f5062728f13f61e84ed9efa6d622e96f5dd 100644 (file)
@@ -47,8 +47,6 @@ struct dyn_arch_ftrace {
        /* No extra data needed for x86 */
 };
 
-int ftrace_int3_handler(struct pt_regs *regs);
-
 #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
 
 #endif /*  CONFIG_DYNAMIC_FTRACE */
index c606c0b7073824ac70f94e1d2fe24ff91320326e..4981c293f926caf29a2d81e982966701ef0e2a4c 100644 (file)
 
 #define INTEL_FAM6_ATOM_TREMONT_D      0x86 /* Jacobsville */
 #define INTEL_FAM6_ATOM_TREMONT                0x96 /* Elkhart Lake */
+#define INTEL_FAM6_ATOM_TREMONT_L      0x9C /* Jasper Lake */
 
 /* Xeon Phi */
 
index 9e7adcdbe031dfc6a30f2e4f47cd125d54418e74..e6da1ce26256b5b0ef835a2bade3c5ba2fcb4631 100644 (file)
 
 #if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
 
-int intel_pmc_ipc_simple_command(int cmd, int sub);
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
-               u32 *out, u32 outlen, u32 dptr, u32 sptr);
 int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
                u32 *out, u32 outlen);
 int intel_pmc_s0ix_counter_read(u64 *data);
-int intel_pmc_gcr_read(u32 offset, u32 *data);
 int intel_pmc_gcr_read64(u32 offset, u64 *data);
-int intel_pmc_gcr_write(u32 offset, u32 data);
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val);
 
 #else
 
-static inline int intel_pmc_ipc_simple_command(int cmd, int sub)
-{
-       return -EINVAL;
-}
-
-static inline int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
-               u32 *out, u32 outlen, u32 dptr, u32 sptr)
-{
-       return -EINVAL;
-}
-
 static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
                u32 *out, u32 outlen)
 {
@@ -66,26 +49,11 @@ static inline int intel_pmc_s0ix_counter_read(u64 *data)
        return -EINVAL;
 }
 
-static inline int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
-       return -EINVAL;
-}
-
 static inline int intel_pmc_gcr_read64(u32 offset, u64 *data)
 {
        return -EINVAL;
 }
 
-static inline int intel_pmc_gcr_write(u32 offset, u32 data)
-{
-       return -EINVAL;
-}
-
-static inline int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
-{
-       return -EINVAL;
-}
-
 #endif /*CONFIG_INTEL_PMC_IPC*/
 
 #endif
index 4a8c6e8173989d561e96f8c2f76fe8a5699d3b0a..2a1442ba6e78146975824b2e66968f5424db3b44 100644 (file)
 /* Read single register */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data);
 
-/* Read two sequential registers */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data);
-
-/* Read four sequential registers */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data);
-
 /* Read a vector */
 int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
 
 /* Write single register */
 int intel_scu_ipc_iowrite8(u16 addr, u8 data);
 
-/* Write two sequential registers */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data);
-
-/* Write four sequential registers */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data);
-
 /* Write a vector */
 int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
 
@@ -50,14 +38,6 @@ int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
 int intel_scu_ipc_simple_command(int cmd, int sub);
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
                          u32 *out, int outlen);
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
-                             u32 *out, int outlen, u32 dptr, u32 sptr);
-
-/* I2C control api */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
-
-/* Update FW version */
-int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
 
 extern struct blocking_notifier_head intel_scu_notifier;
 
index 214394860632c09bb040d4f8326aaec35566cdd4..2f77e31a1283c733a09e95db3f3fa3057281ae65 100644 (file)
@@ -40,13 +40,10 @@ struct telemetry_evtmap {
 struct telemetry_unit_config {
        struct telemetry_evtmap *telem_evts;
        void __iomem *regmap;
-       u32 ssram_base_addr;
        u8 ssram_evts_used;
        u8 curr_period;
        u8 max_period;
        u8 min_period;
-       u32 ssram_size;
-
 };
 
 struct telemetry_plt_config {
index 9997521fc5cd6621a14eda391c739a420524df73..e1aa17a468a858050f7f0becfd98043d72ae64fb 100644 (file)
@@ -399,4 +399,40 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset,
 extern bool phys_mem_access_encrypted(unsigned long phys_addr,
                                      unsigned long size);
 
+/**
+ * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
+ * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @src: source
+ * @count: number of 512 bits quantities to submit
+ *
+ * Submit data from kernel space to MMIO space, in units of 512 bits at a
+ * time.  Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ *
+ * Warning: Do not use this helper unless your driver has checked that the CPU
+ * instruction is supported on the platform.
+ */
+static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+                                   size_t count)
+{
+       /*
+        * Note that this isn't an "on-stack copy", just definition of "dst"
+        * as a pointer to 64-bytes of stuff that is going to be overwritten.
+        * In the MOVDIR64B case that may be needed as you can use the
+        * MOVDIR64B instruction to copy arbitrary memory around. This trick
+        * lets the compiler know how much gets clobbered.
+        */
+       volatile struct { char _[64]; } *dst = __dst;
+       const u8 *from = src;
+       const u8 *end = from + count * 64;
+
+       while (from < end) {
+               /* MOVDIR64B [rdx], rax */
+               asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+                            : "=m" (dst)
+                            : "d" (from), "a" (dst));
+               from += 64;
+       }
+}
+
 #endif /* _ASM_X86_IO_H */
index 5dc909d9ad8174a4c3dca267903679a4e60eeb0b..95b1f053bd96d0080a9693c7c88da8dcf84e00bf 100644 (file)
 
 #include <asm-generic/kprobes.h>
 
-#define BREAKPOINT_INSTRUCTION 0xcc
-
 #ifdef CONFIG_KPROBES
 #include <linux/types.h>
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
+#include <asm/text-patching.h>
 #include <asm/insn.h>
 
 #define  __ARCH_WANT_KPROBES_INSN_SLOT
@@ -25,10 +24,7 @@ struct pt_regs;
 struct kprobe;
 
 typedef u8 kprobe_opcode_t;
-#define RELATIVEJUMP_OPCODE 0xe9
-#define RELATIVEJUMP_SIZE 5
-#define RELATIVECALL_OPCODE 0xe8
-#define RELATIVE_ADDR_SIZE 4
+
 #define MAX_STACK_SIZE 64
 #define CUR_STACK_SIZE(ADDR) \
        (current_top_of_stack() - (unsigned long)(ADDR))
@@ -43,11 +39,11 @@ extern __visible kprobe_opcode_t optprobe_template_entry[];
 extern __visible kprobe_opcode_t optprobe_template_val[];
 extern __visible kprobe_opcode_t optprobe_template_call[];
 extern __visible kprobe_opcode_t optprobe_template_end[];
-#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
+#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + DISP32_SIZE)
 #define MAX_OPTINSN_SIZE                               \
        (((unsigned long)optprobe_template_end -        \
          (unsigned long)optprobe_template_entry) +     \
-        MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
+        MAX_OPTIMIZED_LENGTH + JMP32_INSN_SIZE)
 
 extern const int kretprobe_blacklist_size;
 
@@ -73,7 +69,7 @@ struct arch_specific_insn {
 
 struct arch_optimized_insn {
        /* copy of the original instructions */
-       kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE];
+       kprobe_opcode_t copied_insn[DISP32_SIZE];
        /* detour code buffer */
        kprobe_opcode_t *insn;
        /* the size of instructions copied to detour code buffer */
index dc2d4b206ab7e2417c244050764ef2e0c4953fee..4359b955e0b7ae5e389323aa5d76c1b7a3d509de 100644 (file)
@@ -144,7 +144,7 @@ struct mce_log_buffer {
 
 enum mce_notifier_prios {
        MCE_PRIO_FIRST          = INT_MAX,
-       MCE_PRIO_SRAO           = INT_MAX - 1,
+       MCE_PRIO_UC             = INT_MAX - 1,
        MCE_PRIO_EXTLOG         = INT_MAX - 2,
        MCE_PRIO_NFIT           = INT_MAX - 3,
        MCE_PRIO_EDAC           = INT_MAX - 4,
@@ -290,6 +290,7 @@ extern void apei_mce_report_mem_error(int corrected,
 /* These may be used by multiple smca_hwid_mcatypes */
 enum smca_bank_types {
        SMCA_LS = 0,    /* Load Store */
+       SMCA_LS_V2,     /* Load Store */
        SMCA_IF,        /* Instruction Fetch */
        SMCA_L2_CACHE,  /* L2 Cache */
        SMCA_DE,        /* Decoder Unit */
diff --git a/arch/x86/include/asm/memtype.h b/arch/x86/include/asm/memtype.h
new file mode 100644 (file)
index 0000000..9c2447b
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_MEMTYPE_H
+#define _ASM_X86_MEMTYPE_H
+
+#include <linux/types.h>
+#include <asm/pgtable_types.h>
+
+extern bool pat_enabled(void);
+extern void pat_disable(const char *reason);
+extern void pat_init(void);
+extern void init_cache_modes(void);
+
+extern int memtype_reserve(u64 start, u64 end,
+               enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
+extern int memtype_free(u64 start, u64 end);
+
+extern int memtype_kernel_map_sync(u64 base, unsigned long size,
+               enum page_cache_mode pcm);
+
+extern int memtype_reserve_io(resource_size_t start, resource_size_t end,
+                       enum page_cache_mode *pcm);
+
+extern void memtype_free_io(resource_size_t start, resource_size_t end);
+
+extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
+
+#endif /* _ASM_X86_MEMTYPE_H */
index 209492849566cefc56305cd8e976fbf4a25d7527..6685e1218959d21b2c93ceb56992ef291e427ac0 100644 (file)
@@ -53,6 +53,6 @@ static inline void __init load_ucode_amd_bsp(unsigned int family) {}
 static inline void load_ucode_amd_ap(unsigned int family) {}
 static inline int __init
 save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
-void reload_ucode_amd(void) {}
+static inline void reload_ucode_amd(void) {}
 #endif
 #endif /* _ASM_X86_MICROCODE_AMD_H */
index 5f33924e200fcae9ae72755a1be4f4f821513d65..b243234e90cb1dabd65777a4c80e5a181d327b99 100644 (file)
@@ -69,14 +69,6 @@ struct ldt_struct {
        int                     slot;
 };
 
-/* This is a multiple of PAGE_SIZE. */
-#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
-
-static inline void *ldt_slot_va(int slot)
-{
-       return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
-}
-
 /*
  * Used for LDT copy/destruction.
  */
@@ -99,87 +91,21 @@ static inline void destroy_context_ldt(struct mm_struct *mm) { }
 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
 #endif
 
-static inline void load_mm_ldt(struct mm_struct *mm)
-{
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
-       struct ldt_struct *ldt;
-
-       /* READ_ONCE synchronizes with smp_store_release */
-       ldt = READ_ONCE(mm->context.ldt);
-
-       /*
-        * Any change to mm->context.ldt is followed by an IPI to all
-        * CPUs with the mm active.  The LDT will not be freed until
-        * after the IPI is handled by all such CPUs.  This means that,
-        * if the ldt_struct changes before we return, the values we see
-        * will be safe, and the new values will be loaded before we run
-        * any user code.
-        *
-        * NB: don't try to convert this to use RCU without extreme care.
-        * We would still need IRQs off, because we don't want to change
-        * the local LDT after an IPI loaded a newer value than the one
-        * that we can see.
-        */
-
-       if (unlikely(ldt)) {
-               if (static_cpu_has(X86_FEATURE_PTI)) {
-                       if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
-                               /*
-                                * Whoops -- either the new LDT isn't mapped
-                                * (if slot == -1) or is mapped into a bogus
-                                * slot (if slot > 1).
-                                */
-                               clear_LDT();
-                               return;
-                       }
-
-                       /*
-                        * If page table isolation is enabled, ldt->entries
-                        * will not be mapped in the userspace pagetables.
-                        * Tell the CPU to access the LDT through the alias
-                        * at ldt_slot_va(ldt->slot).
-                        */
-                       set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
-               } else {
-                       set_ldt(ldt->entries, ldt->nr_entries);
-               }
-       } else {
-               clear_LDT();
-       }
+extern void load_mm_ldt(struct mm_struct *mm);
+extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
 #else
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
        clear_LDT();
-#endif
 }
-
 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
 {
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
-       /*
-        * Load the LDT if either the old or new mm had an LDT.
-        *
-        * An mm will never go from having an LDT to not having an LDT.  Two
-        * mms never share an LDT, so we don't gain anything by checking to
-        * see whether the LDT changed.  There's also no guarantee that
-        * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
-        * then prev->context.ldt will also be non-NULL.
-        *
-        * If we really cared, we could optimize the case where prev == next
-        * and we're exiting lazy mode.  Most of the time, if this happens,
-        * we don't actually need to reload LDTR, but modify_ldt() is mostly
-        * used by legacy code and emulators where we don't need this level of
-        * performance.
-        *
-        * This uses | instead of || because it generates better code.
-        */
-       if (unlikely((unsigned long)prev->context.ldt |
-                    (unsigned long)next->context.ldt))
-               load_mm_ldt(next);
-#endif
-
        DEBUG_LOCKS_WARN_ON(preemptible());
 }
+#endif
 
-void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
 /*
  * Init a new mm.  Used on mm copies, like at fork()
index dbff1456d2152a6993ba5f381f6a7ef838b52f28..829df26fd7a3ec2066f6a339cfbd3c51befd2ed5 100644 (file)
@@ -24,7 +24,7 @@
 #define _ASM_X86_MTRR_H
 
 #include <uapi/asm/mtrr.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 
 
 /*
@@ -86,7 +86,7 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 }
 static inline void mtrr_bp_init(void)
 {
-       pat_disable("MTRRs disabled, skipping PAT initialization too.");
+       pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
 }
 
 #define mtrr_ap_init() do {} while (0)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
deleted file mode 100644 (file)
index 92015c6..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_PAT_H
-#define _ASM_X86_PAT_H
-
-#include <linux/types.h>
-#include <asm/pgtable_types.h>
-
-bool pat_enabled(void);
-void pat_disable(const char *reason);
-extern void pat_init(void);
-extern void init_cache_modes(void);
-
-extern int reserve_memtype(u64 start, u64 end,
-               enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
-extern int free_memtype(u64 start, u64 end);
-
-extern int kernel_map_sync_memtype(u64 base, unsigned long size,
-               enum page_cache_mode pcm);
-
-int io_reserve_memtype(resource_size_t start, resource_size_t end,
-                       enum page_cache_mode *pcm);
-
-void io_free_memtype(resource_size_t start, resource_size_t end);
-
-bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
-
-#endif /* _ASM_X86_PAT_H */
index 90d0731fdcb6337c8c49719fcea1cabe4af43b54..c1fdd43fe18764517165847b69bc5349dd3b46fd 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/scatterlist.h>
 #include <linux/numa.h>
 #include <asm/io.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/x86_init.h>
 
 struct pci_sysdata {
diff --git a/arch/x86/include/asm/pgtable_32_areas.h b/arch/x86/include/asm/pgtable_32_areas.h
new file mode 100644 (file)
index 0000000..b635541
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef _ASM_X86_PGTABLE_32_AREAS_H
+#define _ASM_X86_PGTABLE_32_AREAS_H
+
+#include <asm/cpu_entry_area.h>
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+
+#ifndef __ASSEMBLY__
+extern bool __vmalloc_start_set; /* set once high_memory is set */
+#endif
+
+#define VMALLOC_START  ((unsigned long)high_memory + VMALLOC_OFFSET)
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#define CPU_ENTRY_AREA_PAGES           (NR_CPUS * DIV_ROUND_UP(sizeof(struct cpu_entry_area), PAGE_SIZE))
+
+/* The +1 is for the readonly IDT page: */
+#define CPU_ENTRY_AREA_BASE    \
+       ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
+
+#define LDT_BASE_ADDR          \
+       ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
+
+#define LDT_END_ADDR           (LDT_BASE_ADDR + PMD_SIZE)
+
+#define PKMAP_BASE             \
+       ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
+
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
+#else
+# define VMALLOC_END   (LDT_BASE_ADDR - 2 * PAGE_SIZE)
+#endif
+
+#define MODULES_VADDR  VMALLOC_START
+#define MODULES_END    VMALLOC_END
+#define MODULES_LEN    (MODULES_VADDR - MODULES_END)
+
+#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
+
+#endif /* _ASM_X86_PGTABLE_32_AREAS_H */
index 0416d42e5bdd9e3ee90e05d02792f3fedc0169a8..5356a46b037355007dc1d8031d9dbf96321d82ae 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_PGTABLE_32_DEFS_H
-#define _ASM_X86_PGTABLE_32_DEFS_H
+#ifndef _ASM_X86_PGTABLE_32_TYPES_H
+#define _ASM_X86_PGTABLE_32_TYPES_H
 
 /*
  * The Linux x86 paging architecture is 'compile-time dual-mode', it
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE - 1))
 
-/* Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8 * 1024 * 1024)
-
-#ifndef __ASSEMBLY__
-extern bool __vmalloc_start_set; /* set once high_memory is set */
-#endif
-
-#define VMALLOC_START  ((unsigned long)high_memory + VMALLOC_OFFSET)
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-
-/*
- * This is an upper bound on sizeof(struct cpu_entry_area) / PAGE_SIZE.
- * Define this here and validate with BUILD_BUG_ON() in cpu_entry_area.c
- * to avoid include recursion hell.
- */
-#define CPU_ENTRY_AREA_PAGES   (NR_CPUS * 43)
-
-/* The +1 is for the readonly IDT page: */
-#define CPU_ENTRY_AREA_BASE    \
-       ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
-
-#define LDT_BASE_ADDR          \
-       ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
-
-#define LDT_END_ADDR           (LDT_BASE_ADDR + PMD_SIZE)
-
-#define PKMAP_BASE             \
-       ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
-
-#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
-#else
-# define VMALLOC_END   (LDT_BASE_ADDR - 2 * PAGE_SIZE)
-#endif
-
-#define MODULES_VADDR  VMALLOC_START
-#define MODULES_END    VMALLOC_END
-#define MODULES_LEN    (MODULES_VADDR - MODULES_END)
-
-#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
-
-#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
+#endif /* _ASM_X86_PGTABLE_32_TYPES_H */
diff --git a/arch/x86/include/asm/pgtable_areas.h b/arch/x86/include/asm/pgtable_areas.h
new file mode 100644 (file)
index 0000000..d34cce1
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_PGTABLE_AREAS_H
+#define _ASM_X86_PGTABLE_AREAS_H
+
+#ifdef CONFIG_X86_32
+# include <asm/pgtable_32_areas.h>
+#endif
+
+/* Single page reserved for the readonly IDT mapping: */
+#define CPU_ENTRY_AREA_RO_IDT          CPU_ENTRY_AREA_BASE
+#define CPU_ENTRY_AREA_PER_CPU         (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
+
+#define CPU_ENTRY_AREA_RO_IDT_VADDR    ((void *)CPU_ENTRY_AREA_RO_IDT)
+
+#define CPU_ENTRY_AREA_MAP_SIZE                (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
+
+#endif /* _ASM_X86_PGTABLE_AREAS_H */
index b5e49e6bac6352e5f5ca792250a478c4875d8e4e..ea7400726d7a7c15bccbb4fd7e96287da40958ef 100644 (file)
 
 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
-#define _PAGE_TABLE_NOENC      (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
-                                _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE_NOENC    (_PAGE_PRESENT | _PAGE_RW |             \
-                                _PAGE_ACCESSED | _PAGE_DIRTY)
-
 /*
  * Set of bits not changed in pte_modify.  The pte's
  * protection key is treated like _PAGE_RW, for
  */
 #ifndef __ASSEMBLY__
 enum page_cache_mode {
-       _PAGE_CACHE_MODE_WB = 0,
-       _PAGE_CACHE_MODE_WC = 1,
+       _PAGE_CACHE_MODE_WB       = 0,
+       _PAGE_CACHE_MODE_WC       = 1,
        _PAGE_CACHE_MODE_UC_MINUS = 2,
-       _PAGE_CACHE_MODE_UC = 3,
-       _PAGE_CACHE_MODE_WT = 4,
-       _PAGE_CACHE_MODE_WP = 5,
-       _PAGE_CACHE_MODE_NUM = 8
+       _PAGE_CACHE_MODE_UC       = 3,
+       _PAGE_CACHE_MODE_WT       = 4,
+       _PAGE_CACHE_MODE_WP       = 5,
+
+       _PAGE_CACHE_MODE_NUM      = 8
 };
 #endif
 
-#define _PAGE_CACHE_MASK       (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
-#define _PAGE_NOCACHE          (cachemode2protval(_PAGE_CACHE_MODE_UC))
-#define _PAGE_CACHE_WP         (cachemode2protval(_PAGE_CACHE_MODE_WP))
+#define _PAGE_ENC              (_AT(pteval_t, sme_me_mask))
 
-#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
-                                _PAGE_ACCESSED | _PAGE_NX)
-
-#define PAGE_SHARED_EXEC       __pgprot(_PAGE_PRESENT | _PAGE_RW |     \
-                                        _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC       __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC         __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED)
-#define PAGE_COPY              PAGE_COPY_NOEXEC
-#define PAGE_READONLY          __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED)
-
-#define __PAGE_KERNEL_EXEC                                             \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
-#define __PAGE_KERNEL          (__PAGE_KERNEL_EXEC | _PAGE_NX)
-
-#define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
-#define __PAGE_KERNEL_RX               (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
-#define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_NOCACHE)
-#define __PAGE_KERNEL_VVAR             (__PAGE_KERNEL_RO | _PAGE_USER)
-#define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-#define __PAGE_KERNEL_WP               (__PAGE_KERNEL | _PAGE_CACHE_WP)
-
-#define __PAGE_KERNEL_IO               (__PAGE_KERNEL)
-#define __PAGE_KERNEL_IO_NOCACHE       (__PAGE_KERNEL_NOCACHE)
+#define _PAGE_CACHE_MASK       (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
 
-#ifndef __ASSEMBLY__
+#define _PAGE_NOCACHE          (cachemode2protval(_PAGE_CACHE_MODE_UC))
+#define _PAGE_CACHE_WP         (cachemode2protval(_PAGE_CACHE_MODE_WP))
 
-#define _PAGE_ENC      (_AT(pteval_t, sme_me_mask))
+#define __PP _PAGE_PRESENT
+#define __RW _PAGE_RW
+#define _USR _PAGE_USER
+#define ___A _PAGE_ACCESSED
+#define ___D _PAGE_DIRTY
+#define ___G _PAGE_GLOBAL
+#define __NX _PAGE_NX
+
+#define _ENC _PAGE_ENC
+#define __WP _PAGE_CACHE_WP
+#define __NC _PAGE_NOCACHE
+#define _PSE _PAGE_PSE
+
+#define pgprot_val(x)          ((x).pgprot)
+#define __pgprot(x)            ((pgprot_t) { (x) } )
+#define __pg(x)                        __pgprot(x)
+
+#define _PAGE_PAT_LARGE                (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+
+#define PAGE_NONE           __pg(   0|   0|   0|___A|   0|   0|   0|___G)
+#define PAGE_SHARED         __pg(__PP|__RW|_USR|___A|__NX|   0|   0|   0)
+#define PAGE_SHARED_EXEC     __pg(__PP|__RW|_USR|___A|   0|   0|   0|   0)
+#define PAGE_COPY_NOEXEC     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
+#define PAGE_COPY_EXEC      __pg(__PP|   0|_USR|___A|   0|   0|   0|   0)
+#define PAGE_COPY           __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
+#define PAGE_READONLY       __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
+#define PAGE_READONLY_EXEC   __pg(__PP|   0|_USR|___A|   0|   0|   0|   0)
+
+#define __PAGE_KERNEL           (__PP|__RW|   0|___A|__NX|___D|   0|___G)
+#define __PAGE_KERNEL_EXEC      (__PP|__RW|   0|___A|   0|___D|   0|___G)
+#define _KERNPG_TABLE_NOENC     (__PP|__RW|   0|___A|   0|___D|   0|   0)
+#define _KERNPG_TABLE           (__PP|__RW|   0|___A|   0|___D|   0|   0| _ENC)
+#define _PAGE_TABLE_NOENC       (__PP|__RW|_USR|___A|   0|___D|   0|   0)
+#define _PAGE_TABLE             (__PP|__RW|_USR|___A|   0|___D|   0|   0| _ENC)
+#define __PAGE_KERNEL_RO        (__PP|   0|   0|___A|__NX|___D|   0|___G)
+#define __PAGE_KERNEL_RX        (__PP|   0|   0|___A|   0|___D|   0|___G)
+#define __PAGE_KERNEL_NOCACHE   (__PP|__RW|   0|___A|__NX|___D|   0|___G| __NC)
+#define __PAGE_KERNEL_VVAR      (__PP|   0|_USR|___A|__NX|___D|   0|___G)
+#define __PAGE_KERNEL_LARGE     (__PP|__RW|   0|___A|__NX|___D|_PSE|___G)
+#define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW|   0|___A|   0|___D|_PSE|___G)
+#define __PAGE_KERNEL_WP        (__PP|__RW|   0|___A|__NX|___D|   0|___G| __WP)
+
+
+#define __PAGE_KERNEL_IO               __PAGE_KERNEL
+#define __PAGE_KERNEL_IO_NOCACHE       __PAGE_KERNEL_NOCACHE
 
-#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
-                        _PAGE_DIRTY | _PAGE_ENC)
-#define _PAGE_TABLE    (_KERNPG_TABLE | _PAGE_USER)
 
-#define __PAGE_KERNEL_ENC      (__PAGE_KERNEL | _PAGE_ENC)
-#define __PAGE_KERNEL_ENC_WP   (__PAGE_KERNEL_WP | _PAGE_ENC)
+#ifndef __ASSEMBLY__
 
-#define __PAGE_KERNEL_NOENC    (__PAGE_KERNEL)
-#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP)
+#define __PAGE_KERNEL_ENC      (__PAGE_KERNEL    | _ENC)
+#define __PAGE_KERNEL_ENC_WP   (__PAGE_KERNEL_WP | _ENC)
+#define __PAGE_KERNEL_NOENC    (__PAGE_KERNEL    |    0)
+#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP |    0)
 
-#define default_pgprot(x)      __pgprot((x) & __default_kernel_pte_mask)
+#define __pgprot_mask(x)       __pgprot((x) & __default_kernel_pte_mask)
 
-#define PAGE_KERNEL            default_pgprot(__PAGE_KERNEL | _PAGE_ENC)
-#define PAGE_KERNEL_NOENC      default_pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO         default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
-#define PAGE_KERNEL_EXEC       default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX         default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
-#define PAGE_KERNEL_NOCACHE    default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
-#define PAGE_KERNEL_LARGE      default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
-#define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_VVAR       default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
+#define PAGE_KERNEL            __pgprot_mask(__PAGE_KERNEL            | _ENC)
+#define PAGE_KERNEL_NOENC      __pgprot_mask(__PAGE_KERNEL            |    0)
+#define PAGE_KERNEL_RO         __pgprot_mask(__PAGE_KERNEL_RO         | _ENC)
+#define PAGE_KERNEL_EXEC       __pgprot_mask(__PAGE_KERNEL_EXEC       | _ENC)
+#define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC       |    0)
+#define PAGE_KERNEL_RX         __pgprot_mask(__PAGE_KERNEL_RX         | _ENC)
+#define PAGE_KERNEL_NOCACHE    __pgprot_mask(__PAGE_KERNEL_NOCACHE    | _ENC)
+#define PAGE_KERNEL_LARGE      __pgprot_mask(__PAGE_KERNEL_LARGE      | _ENC)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
+#define PAGE_KERNEL_VVAR       __pgprot_mask(__PAGE_KERNEL_VVAR       | _ENC)
 
-#define PAGE_KERNEL_IO         default_pgprot(__PAGE_KERNEL_IO)
-#define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#define PAGE_KERNEL_IO         __pgprot_mask(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
 
 #endif /* __ASSEMBLY__ */
 
@@ -449,9 +457,6 @@ static inline pteval_t pte_flags(pte_t pte)
        return native_pte_val(pte) & PTE_FLAGS_MASK;
 }
 
-#define pgprot_val(x)  ((x).pgprot)
-#define __pgprot(x)    ((pgprot_t) { (x) } )
-
 extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
 extern uint8_t __pte2cachemode_tbl[8];
 
index 5057a8ed100b8234a9e0dddfeae2a712b98b3101..78897a8da01f7c230e860f12962bebdc2b1a2954 100644 (file)
@@ -339,22 +339,6 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
 
 #define ARCH_HAS_USER_SINGLE_STEP_REPORT
 
-/*
- * When hitting ptrace_stop(), we cannot return using SYSRET because
- * that does not restore the full CPU state, only a minimal set.  The
- * ptracer can change arbitrary register values, which is usually okay
- * because the usual ptrace stops run off the signal delivery path which
- * forces IRET; however, ptrace_event() stops happen in arbitrary places
- * in the kernel and don't force IRET path.
- *
- * So force IRET path after a ptrace stop.
- */
-#define arch_ptrace_stop_needed(code, info)                            \
-({                                                                     \
-       force_iret();                                                   \
-       false;                                                          \
-})
-
 struct user_desc;
 extern int do_get_thread_area(struct task_struct *p, int idx,
                              struct user_desc __user *info);
index 2ee8e469dcf5ce88845aca1cb62385b6df4a6dfd..64c3dce374e5aa206ac4e09ef384896c1bac3b50 100644 (file)
@@ -81,8 +81,6 @@ int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
 
 extern int kernel_set_to_readonly;
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
 
 #ifdef CONFIG_X86_64
 static inline int set_mce_nospec(unsigned long pfn)
index 23c626a742e87982f321865adad9d64706f727d5..67315fa3956a19fb5a89068b7cc2f67135ac61af 100644 (file)
@@ -25,14 +25,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
  */
 #define POKE_MAX_OPCODE_SIZE   5
 
-struct text_poke_loc {
-       void *addr;
-       int len;
-       s32 rel32;
-       u8 opcode;
-       const u8 text[POKE_MAX_OPCODE_SIZE];
-};
-
 extern void text_poke_early(void *addr, const void *opcode, size_t len);
 
 /*
@@ -50,21 +42,13 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len);
  * an inconsistent instruction while you patch.
  */
 extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void text_poke_sync(void);
 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
 extern int poke_int3_handler(struct pt_regs *regs);
 extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
-extern void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries);
-extern void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
-                              const void *opcode, size_t len, const void *emulate);
-extern int after_bootmem;
-extern __ro_after_init struct mm_struct *poking_mm;
-extern __ro_after_init unsigned long poking_addr;
 
-#ifndef CONFIG_UML_X86
-static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
-{
-       regs->ip = ip;
-}
+extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
+extern void text_poke_finish(void);
 
 #define INT3_INSN_SIZE         1
 #define INT3_INSN_OPCODE       0xCC
@@ -78,6 +62,67 @@ static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
 #define JMP8_INSN_SIZE         2
 #define JMP8_INSN_OPCODE       0xEB
 
+#define DISP32_SIZE            4
+
+static inline int text_opcode_size(u8 opcode)
+{
+       int size = 0;
+
+#define __CASE(insn)   \
+       case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
+
+       switch(opcode) {
+       __CASE(INT3);
+       __CASE(CALL);
+       __CASE(JMP32);
+       __CASE(JMP8);
+       }
+
+#undef __CASE
+
+       return size;
+}
+
+union text_poke_insn {
+       u8 text[POKE_MAX_OPCODE_SIZE];
+       struct {
+               u8 opcode;
+               s32 disp;
+       } __attribute__((packed));
+};
+
+static __always_inline
+void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
+{
+       static union text_poke_insn insn; /* per instance */
+       int size = text_opcode_size(opcode);
+
+       insn.opcode = opcode;
+
+       if (size > 1) {
+               insn.disp = (long)dest - (long)(addr + size);
+               if (size == 2) {
+                       /*
+                        * Ensure that for JMP9 the displacement
+                        * actually fits the signed byte.
+                        */
+                       BUG_ON((insn.disp >> 31) != (insn.disp >> 7));
+               }
+       }
+
+       return &insn.text;
+}
+
+extern int after_bootmem;
+extern __ro_after_init struct mm_struct *poking_mm;
+extern __ro_after_init unsigned long poking_addr;
+
+#ifndef CONFIG_UML_X86
+static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+{
+       regs->ip = ip;
+}
+
 static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
 {
        /*
@@ -85,6 +130,9 @@ static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
         * stack where the break point happened, and the saving of
         * pt_regs. We can extend the original stack because of
         * this gap. See the idtentry macro's create_gap option.
+        *
+        * Similarly entry_32.S will have a gap on the stack for (any) hardware
+        * exception and pt_regs; see FIXUP_FRAME.
         */
        regs->sp -= sizeof(unsigned long);
        *(unsigned long *)regs->sp = val;
index d779366ce3f89f161c977e7fad5389659108a1bd..cf4327986e98d5bb6ef8a322d4314d0d76e5c9fd 100644 (file)
@@ -239,15 +239,6 @@ static inline int arch_within_stack_frames(const void * const stack,
                           current_thread_info()->status & TS_COMPAT)
 #endif
 
-/*
- * Force syscall return via IRET by making it look as if there was
- * some work pending. IRET is our most capable (but slowest) syscall
- * return path, which is able to restore modified SS, CS and certain
- * EFLAGS values that other (fast) syscall return instructions
- * are not able to restore properly.
- */
-#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
-
 extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 extern void arch_release_task_struct(struct task_struct *tsk);
index 230474e2ddb5b3eb687cdd647a15f750a65e5f32..bbcdc7b8f963c076186fbc706938b67b4bc61823 100644 (file)
@@ -21,6 +21,7 @@ struct vdso_image {
        long sym_vvar_page;
        long sym_pvclock_page;
        long sym_hvclock_page;
+       long sym_timens_page;
        long sym_VDSO32_NOTE_MASK;
        long sym___kernel_sigreturn;
        long sym___kernel_rt_sigreturn;
index e9ee139cf29e05ab861d614209601b13d281cf4c..6ee1f7dba34bf4bfc87299a592ed38f76eb86b35 100644 (file)
@@ -21,6 +21,7 @@
 #include <clocksource/hyperv_timer.h>
 
 #define __vdso_data (VVAR(_vdso_data))
+#define __timens_vdso_data (TIMENS(_vdso_data))
 
 #define VDSO_HAS_TIME 1
 
@@ -56,6 +57,13 @@ extern struct ms_hyperv_tsc_page hvclock_page
        __attribute__((visibility("hidden")));
 #endif
 
+#ifdef CONFIG_TIME_NS
+static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+{
+       return __timens_vdso_data;
+}
+#endif
+
 #ifndef BUILD_VDSO32
 
 static __always_inline
@@ -96,8 +104,6 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 
 #else
 
-#define VDSO_HAS_32BIT_FALLBACK        1
-
 static __always_inline
 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 {
diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..2983774
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_VMALLOC_H
+#define _ASM_X86_VMALLOC_H
+
+#include <asm/pgtable_areas.h>
+
+#endif /* _ASM_X86_VMALLOC_H */
index 32f5d9a0b90ed7519747ea9b962d6c547bc5e4e7..183e98e49ab943aeaf9e7ecc183c2f383545887d 100644 (file)
 #ifndef _ASM_X86_VVAR_H
 #define _ASM_X86_VVAR_H
 
-#if defined(__VVAR_KERNEL_LDS)
-
-/* The kernel linker script defines its own magic to put vvars in the
- * right place.
+#ifdef EMIT_VVAR
+/*
+ * EMIT_VVAR() is used by the kernel linker script to put vvars in the
+ * right place. Also, it's used by kernel code to import offsets values.
  */
 #define DECLARE_VVAR(offset, type, name) \
        EMIT_VVAR(name, offset)
@@ -33,9 +33,12 @@ extern char __vvar_page;
 
 #define DECLARE_VVAR(offset, type, name)                               \
        extern type vvar_ ## name[CS_BASES]                             \
-       __attribute__((visibility("hidden")));
+       __attribute__((visibility("hidden")));                          \
+       extern type timens_ ## name[CS_BASES]                           \
+       __attribute__((visibility("hidden")));                          \
 
 #define VVAR(name) (vvar_ ## name)
+#define TIMENS(name) (timens_ ## name)
 
 #define DEFINE_VVAR(type, name)                                                \
        type name[CS_BASES]                                             \
index ca13851f05701387d5faa8bc57eb04fd9b513a9a..26b7256f590fd78a537aec79e2132d1f184c5487 100644 (file)
@@ -26,6 +26,17 @@ unsigned long acpi_realmode_flags;
 static char temp_stack[4096];
 #endif
 
+/**
+ * acpi_get_wakeup_address - provide physical address for S3 wakeup
+ *
+ * Returns the physical address where the kernel should be resumed after the
+ * system awakes from S3, e.g. for programming into the firmware waking vector.
+ */
+unsigned long acpi_get_wakeup_address(void)
+{
+       return ((unsigned long)(real_mode_header->wakeup_start));
+}
+
 /**
  * x86_acpi_enter_sleep_state - enter sleep state
  * @state: Sleep state to enter.
index fbb60ca4255c68fc89acb2762b252db5113a0943..d06c2079b6c142447beb4e05b2facaff70f7831c 100644 (file)
@@ -3,7 +3,7 @@
  *     Variables and functions used by the code in sleep.c
  */
 
-#include <asm/realmode.h>
+#include <linux/linkage.h>
 
 extern unsigned long saved_video_mode;
 extern long saved_magic;
index 9ec463fe96f2c24954ee4531acdc150e359fd761..34360ca301a2e39fabd0e54cab328c11093a4c84 100644 (file)
@@ -936,44 +936,81 @@ static void do_sync_core(void *info)
        sync_core();
 }
 
-static struct bp_patching_desc {
+void text_poke_sync(void)
+{
+       on_each_cpu(do_sync_core, NULL, 1);
+}
+
+struct text_poke_loc {
+       s32 rel_addr; /* addr := _stext + rel_addr */
+       s32 rel32;
+       u8 opcode;
+       const u8 text[POKE_MAX_OPCODE_SIZE];
+};
+
+struct bp_patching_desc {
        struct text_poke_loc *vec;
        int nr_entries;
-} bp_patching;
+       atomic_t refs;
+};
+
+static struct bp_patching_desc *bp_desc;
+
+static inline struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
+{
+       struct bp_patching_desc *desc = READ_ONCE(*descp); /* rcu_dereference */
+
+       if (!desc || !atomic_inc_not_zero(&desc->refs))
+               return NULL;
+
+       return desc;
+}
+
+static inline void put_desc(struct bp_patching_desc *desc)
+{
+       smp_mb__before_atomic();
+       atomic_dec(&desc->refs);
+}
 
-static int patch_cmp(const void *key, const void *elt)
+static inline void *text_poke_addr(struct text_poke_loc *tp)
+{
+       return _stext + tp->rel_addr;
+}
+
+static int notrace patch_cmp(const void *key, const void *elt)
 {
        struct text_poke_loc *tp = (struct text_poke_loc *) elt;
 
-       if (key < tp->addr)
+       if (key < text_poke_addr(tp))
                return -1;
-       if (key > tp->addr)
+       if (key > text_poke_addr(tp))
                return 1;
        return 0;
 }
 NOKPROBE_SYMBOL(patch_cmp);
 
-int poke_int3_handler(struct pt_regs *regs)
+int notrace poke_int3_handler(struct pt_regs *regs)
 {
+       struct bp_patching_desc *desc;
        struct text_poke_loc *tp;
+       int len, ret = 0;
        void *ip;
 
+       if (user_mode(regs))
+               return 0;
+
        /*
         * Having observed our INT3 instruction, we now must observe
-        * bp_patching.nr_entries.
+        * bp_desc:
         *
-        *      nr_entries != 0                 INT3
+        *      bp_desc = desc                  INT3
         *      WMB                             RMB
-        *      write INT3                      if (nr_entries)
-        *
-        * Idem for other elements in bp_patching.
+        *      write INT3                      if (desc)
         */
        smp_rmb();
 
-       if (likely(!bp_patching.nr_entries))
-               return 0;
-
-       if (user_mode(regs))
+       desc = try_get_desc(&bp_desc);
+       if (!desc)
                return 0;
 
        /*
@@ -984,19 +1021,20 @@ int poke_int3_handler(struct pt_regs *regs)
        /*
         * Skip the binary search if there is a single member in the vector.
         */
-       if (unlikely(bp_patching.nr_entries > 1)) {
-               tp = bsearch(ip, bp_patching.vec, bp_patching.nr_entries,
+       if (unlikely(desc->nr_entries > 1)) {
+               tp = bsearch(ip, desc->vec, desc->nr_entries,
                             sizeof(struct text_poke_loc),
                             patch_cmp);
                if (!tp)
-                       return 0;
+                       goto out_put;
        } else {
-               tp = bp_patching.vec;
-               if (tp->addr != ip)
-                       return 0;
+               tp = desc->vec;
+               if (text_poke_addr(tp) != ip)
+                       goto out_put;
        }
 
-       ip += tp->len;
+       len = text_opcode_size(tp->opcode);
+       ip += len;
 
        switch (tp->opcode) {
        case INT3_INSN_OPCODE:
@@ -1004,7 +1042,7 @@ int poke_int3_handler(struct pt_regs *regs)
                 * Someone poked an explicit INT3, they'll want to handle it,
                 * do not consume.
                 */
-               return 0;
+               goto out_put;
 
        case CALL_INSN_OPCODE:
                int3_emulate_call(regs, (long)ip + tp->rel32);
@@ -1019,10 +1057,18 @@ int poke_int3_handler(struct pt_regs *regs)
                BUG();
        }
 
-       return 1;
+       ret = 1;
+
+out_put:
+       put_desc(desc);
+       return ret;
 }
 NOKPROBE_SYMBOL(poke_int3_handler);
 
+#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
+static struct text_poke_loc tp_vec[TP_VEC_MAX];
+static int tp_vec_nr;
+
 /**
  * text_poke_bp_batch() -- update instructions on live kernel on SMP
  * @tp:                        vector of instructions to patch
@@ -1044,16 +1090,20 @@ NOKPROBE_SYMBOL(poke_int3_handler);
  *               replacing opcode
  *     - sync cores
  */
-void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
+static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
 {
+       struct bp_patching_desc desc = {
+               .vec = tp,
+               .nr_entries = nr_entries,
+               .refs = ATOMIC_INIT(1),
+       };
        unsigned char int3 = INT3_INSN_OPCODE;
        unsigned int i;
        int do_sync;
 
        lockdep_assert_held(&text_mutex);
 
-       bp_patching.vec = tp;
-       bp_patching.nr_entries = nr_entries;
+       smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
 
        /*
         * Corresponding read barrier in int3 notifier for making sure the
@@ -1065,18 +1115,20 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
         * First step: add a int3 trap to the address that will be patched.
         */
        for (i = 0; i < nr_entries; i++)
-               text_poke(tp[i].addr, &int3, sizeof(int3));
+               text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
 
-       on_each_cpu(do_sync_core, NULL, 1);
+       text_poke_sync();
 
        /*
         * Second step: update all but the first byte of the patched range.
         */
        for (do_sync = 0, i = 0; i < nr_entries; i++) {
-               if (tp[i].len - sizeof(int3) > 0) {
-                       text_poke((char *)tp[i].addr + sizeof(int3),
-                                 (const char *)tp[i].text + sizeof(int3),
-                                 tp[i].len - sizeof(int3));
+               int len = text_opcode_size(tp[i].opcode);
+
+               if (len - INT3_INSN_SIZE > 0) {
+                       text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+                                 (const char *)tp[i].text + INT3_INSN_SIZE,
+                                 len - INT3_INSN_SIZE);
                        do_sync++;
                }
        }
@@ -1087,7 +1139,7 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
                 * not necessary and we'd be safe even without it. But
                 * better safe than sorry (plus there's not only Intel).
                 */
-               on_each_cpu(do_sync_core, NULL, 1);
+               text_poke_sync();
        }
 
        /*
@@ -1098,19 +1150,20 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
                if (tp[i].text[0] == INT3_INSN_OPCODE)
                        continue;
 
-               text_poke(tp[i].addr, tp[i].text, sizeof(int3));
+               text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
                do_sync++;
        }
 
        if (do_sync)
-               on_each_cpu(do_sync_core, NULL, 1);
+               text_poke_sync();
 
        /*
-        * sync_core() implies an smp_mb() and orders this store against
-        * the writing of the new instruction.
+        * Remove and synchronize_rcu(), except we have a very primitive
+        * refcount based completion.
         */
-       bp_patching.vec = NULL;
-       bp_patching.nr_entries = 0;
+       WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
+       if (!atomic_dec_and_test(&desc.refs))
+               atomic_cond_read_acquire(&desc.refs, !VAL);
 }
 
 void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
@@ -1118,11 +1171,7 @@ void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
 {
        struct insn insn;
 
-       if (!opcode)
-               opcode = (void *)tp->text;
-       else
-               memcpy((void *)tp->text, opcode, len);
-
+       memcpy((void *)tp->text, opcode, len);
        if (!emulate)
                emulate = opcode;
 
@@ -1132,8 +1181,7 @@ void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
        BUG_ON(!insn_complete(&insn));
        BUG_ON(len != insn.length);
 
-       tp->addr = addr;
-       tp->len = len;
+       tp->rel_addr = addr - (void *)_stext;
        tp->opcode = insn.opcode.bytes[0];
 
        switch (tp->opcode) {
@@ -1167,6 +1215,55 @@ void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
        }
 }
 
+/*
+ * We hard rely on the tp_vec being ordered; ensure this is so by flushing
+ * early if needed.
+ */
+static bool tp_order_fail(void *addr)
+{
+       struct text_poke_loc *tp;
+
+       if (!tp_vec_nr)
+               return false;
+
+       if (!addr) /* force */
+               return true;
+
+       tp = &tp_vec[tp_vec_nr - 1];
+       if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
+               return true;
+
+       return false;
+}
+
+static void text_poke_flush(void *addr)
+{
+       if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
+               text_poke_bp_batch(tp_vec, tp_vec_nr);
+               tp_vec_nr = 0;
+       }
+}
+
+void text_poke_finish(void)
+{
+       text_poke_flush(NULL);
+}
+
+void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
+{
+       struct text_poke_loc *tp;
+
+       if (unlikely(system_state == SYSTEM_BOOTING)) {
+               text_poke_early(addr, opcode, len);
+               return;
+       }
+
+       text_poke_flush(addr);
+
+       tp = &tp_vec[tp_vec_nr++];
+       text_poke_loc_init(tp, addr, opcode, len, emulate);
+}
+
 /**
  * text_poke_bp() -- update instructions on live kernel on SMP
  * @addr:      address to patch
@@ -1178,10 +1275,15 @@ void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
  * dynamically allocated memory. This function should be used when it is
  * not possible to allocate memory.
  */
-void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
+void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
 {
        struct text_poke_loc tp;
 
+       if (unlikely(system_state == SYSTEM_BOOTING)) {
+               text_poke_early(addr, opcode, len);
+               return;
+       }
+
        text_poke_loc_init(&tp, addr, opcode, len, emulate);
        text_poke_bp_batch(&tp, 1);
 }
index 251c795b4eb3cd75991aabd6b1cffa6b3fedb8fe..69aed0ebbdfc962691f367706fe8ddeb7cc51df5 100644 (file)
@@ -22,6 +22,7 @@
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+#define PCI_DEVICE_ID_AMD_19H_DF_F4    0x1654
 
 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
 static DEFINE_MUTEX(smn_mutex);
@@ -52,6 +53,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
        {}
 };
 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -66,6 +68,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
        {}
 };
index 5da106f84e8407c3e3aed2f356b9b96132520be6..fe698f96617ca7ee9bbdde5107af1abc58fc17e2 100644 (file)
@@ -95,7 +95,7 @@ static inline void apbt_set_mapping(void)
                printk(KERN_WARNING "No timer base from SFI, use default\n");
                apbt_address = APBT_DEFAULT_BASE;
        }
-       apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
+       apbt_virt_address = ioremap(apbt_address, APBT_MMAP_SIZE);
        if (!apbt_virt_address) {
                pr_debug("Failed mapping APBT phy address at %lu\n",\
                         (unsigned long)apbt_address);
index d5b51a740524d6759a1a3c88a0b022a6335abe48..ad53b2abc859fe017676c4cf97d8add35455f42b 100644 (file)
@@ -1493,65 +1493,34 @@ static void check_efi_reboot(void)
 }
 
 /* Setup user proc fs files */
-static int proc_hubbed_show(struct seq_file *file, void *data)
+static int __maybe_unused proc_hubbed_show(struct seq_file *file, void *data)
 {
        seq_printf(file, "0x%x\n", uv_hubbed_system);
        return 0;
 }
 
-static int proc_hubless_show(struct seq_file *file, void *data)
+static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data)
 {
        seq_printf(file, "0x%x\n", uv_hubless_system);
        return 0;
 }
 
-static int proc_oemid_show(struct seq_file *file, void *data)
+static int __maybe_unused proc_oemid_show(struct seq_file *file, void *data)
 {
        seq_printf(file, "%s/%s\n", oem_id, oem_table_id);
        return 0;
 }
 
-static int proc_hubbed_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, proc_hubbed_show, (void *)NULL);
-}
-
-static int proc_hubless_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, proc_hubless_show, (void *)NULL);
-}
-
-static int proc_oemid_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, proc_oemid_show, (void *)NULL);
-}
-
-/* (struct is "non-const" as open function is set at runtime) */
-static struct file_operations proc_version_fops = {
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static const struct file_operations proc_oemid_fops = {
-       .open           = proc_oemid_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
 static __init void uv_setup_proc_files(int hubless)
 {
        struct proc_dir_entry *pde;
-       char *name = hubless ? "hubless" : "hubbed";
 
        pde = proc_mkdir(UV_PROC_NODE, NULL);
-       proc_create("oemid", 0, pde, &proc_oemid_fops);
-       proc_create(name, 0, pde, &proc_version_fops);
+       proc_create_single("oemid", 0, pde, proc_oemid_show);
        if (hubless)
-               proc_version_fops.open = proc_hubless_open;
+               proc_create_single("hubless", 0, pde, proc_hubless_show);
        else
-               proc_version_fops.open = proc_hubbed_open;
+               proc_create_single("hubbed", 0, pde, proc_hubbed_show);
 }
 
 /* Initialize UV hubless systems */
index 8a88d3f01476c727d43d3c8e86f1d54da8f4e660..ac83a0fef6285d9901bf44b2b5169ab9fef89698 100644 (file)
@@ -608,9 +608,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
                return;
 
 clear_all:
-               clear_cpu_cap(c, X86_FEATURE_SME);
+               setup_clear_cpu_cap(X86_FEATURE_SME);
 clear_sev:
-               clear_cpu_cap(c, X86_FEATURE_SEV);
+               setup_clear_cpu_cap(X86_FEATURE_SEV);
        }
 }
 
index 6b95f18255fa45d1954025e91f148c79ebb6c9d7..745c40135bd397f35b69297dd2c43d445279cdd3 100644 (file)
@@ -50,7 +50,7 @@
 #include <asm/cpu.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/microcode.h>
 #include <asm/microcode_intel.h>
 #include <asm/intel-family.h>
@@ -1024,6 +1024,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #define MSBDS_ONLY             BIT(5)
 #define NO_SWAPGS              BIT(6)
 #define NO_ITLB_MULTIHIT       BIT(7)
+#define NO_SPECTRE_V2          BIT(8)
 
 #define VULNWL(_vendor, _family, _model, _whitelist)   \
        { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1085,6 +1086,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
        VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
        VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+       /* Zhaoxin Family 7 */
+       VULNWL(CENTAUR, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS),
+       VULNWL(ZHAOXIN, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS),
        {}
 };
 
@@ -1117,7 +1122,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
-       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+       if (!cpu_matches(NO_SPECTRE_V2))
+               setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
        if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
index 5167bd2bb6b14dae63313d1def4d93ed1eaac38d..b3a50d962851cec722df10623dbc90e6a3f16985 100644 (file)
@@ -78,6 +78,7 @@ struct smca_bank_name {
 
 static struct smca_bank_name smca_names[] = {
        [SMCA_LS]       = { "load_store",       "Load Store Unit" },
+       [SMCA_LS_V2]    = { "load_store",       "Load Store Unit" },
        [SMCA_IF]       = { "insn_fetch",       "Instruction Fetch Unit" },
        [SMCA_L2_CACHE] = { "l2_cache",         "L2 Cache" },
        [SMCA_DE]       = { "decode_unit",      "Decode Unit" },
@@ -138,6 +139,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
 
        /* ZN Core (HWID=0xB0) MCA types */
        { SMCA_LS,       HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF },
+       { SMCA_LS_V2,    HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF },
        { SMCA_IF,       HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
        { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
        { SMCA_DE,       HWID_MCATYPE(0xB0, 0x3), 0x1FF },
@@ -266,10 +268,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
        smca_set_misc_banks_map(bank, cpu);
 
        /* Return early if this bank was already initialized. */
-       if (smca_banks[bank].hwid)
+       if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
                return;
 
-       if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
+       if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
                pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
                return;
        }
index 5f42f25bac8f8c9643def8bae0b93d513c9fbb8b..2c4f949611e48f616f6d40519347e577613e2257 100644 (file)
@@ -53,8 +53,6 @@
 
 #include "internal.h"
 
-static DEFINE_MUTEX(mce_log_mutex);
-
 /* sysfs synchronization */
 static DEFINE_MUTEX(mce_sysfs_mutex);
 
@@ -156,19 +154,10 @@ void mce_log(struct mce *m)
        if (!mce_gen_pool_add(m))
                irq_work_queue(&mce_irq_work);
 }
-
-void mce_inject_log(struct mce *m)
-{
-       mutex_lock(&mce_log_mutex);
-       mce_log(m);
-       mutex_unlock(&mce_log_mutex);
-}
-EXPORT_SYMBOL_GPL(mce_inject_log);
-
-static struct notifier_block mce_srao_nb;
+EXPORT_SYMBOL_GPL(mce_log);
 
 /*
- * We run the default notifier if we have only the SRAO, the first and the
+ * We run the default notifier if we have only the UC, the first and the
  * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
  * notifiers registered on the chain.
  */
@@ -594,26 +583,29 @@ static struct notifier_block first_nb = {
        .priority       = MCE_PRIO_FIRST,
 };
 
-static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
-                               void *data)
+static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
+                             void *data)
 {
        struct mce *mce = (struct mce *)data;
        unsigned long pfn;
 
-       if (!mce)
+       if (!mce || !mce_usable_address(mce))
                return NOTIFY_DONE;
 
-       if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
-               pfn = mce->addr >> PAGE_SHIFT;
-               if (!memory_failure(pfn, 0))
-                       set_mce_nospec(pfn);
-       }
+       if (mce->severity != MCE_AO_SEVERITY &&
+           mce->severity != MCE_DEFERRED_SEVERITY)
+               return NOTIFY_DONE;
+
+       pfn = mce->addr >> PAGE_SHIFT;
+       if (!memory_failure(pfn, 0))
+               set_mce_nospec(pfn);
 
        return NOTIFY_OK;
 }
-static struct notifier_block mce_srao_nb = {
-       .notifier_call  = srao_decode_notifier,
-       .priority       = MCE_PRIO_SRAO,
+
+static struct notifier_block mce_uc_nb = {
+       .notifier_call  = uc_decode_notifier,
+       .priority       = MCE_PRIO_UC,
 };
 
 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
@@ -763,26 +755,22 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 log_it:
                error_seen = true;
 
-               mce_read_aux(&m, i);
+               if (flags & MCP_DONTLOG)
+                       goto clear_it;
 
+               mce_read_aux(&m, i);
                m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
-
                /*
                 * Don't get the IP here because it's unlikely to
                 * have anything to do with the actual error location.
                 */
-               if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
-                       mce_log(&m);
-               else if (mce_usable_address(&m)) {
-                       /*
-                        * Although we skipped logging this, we still want
-                        * to take action. Add to the pool so the registered
-                        * notifiers will see it.
-                        */
-                       if (!mce_gen_pool_add(&m))
-                               mce_schedule_work();
-               }
 
+               if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
+                       goto clear_it;
+
+               mce_log(&m);
+
+clear_it:
                /*
                 * Clear state for this bank.
                 */
@@ -807,7 +795,7 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
-       char *tmp;
+       char *tmp = *msg;
        int i;
 
        for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
@@ -819,8 +807,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                if (quirk_no_way_out)
                        quirk_no_way_out(i, m, regs);
 
+               m->bank = i;
                if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
-                       m->bank = i;
                        mce_read_aux(m, i);
                        *msg = tmp;
                        return 1;
@@ -1232,8 +1220,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
        struct mca_config *cfg = &mca_cfg;
        int cpu = smp_processor_id();
-       char *msg = "Unknown";
        struct mce m, *final;
+       char *msg = NULL;
        int worst = 0;
 
        /*
@@ -1365,7 +1353,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                ist_end_non_atomic();
        } else {
                if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
-                       mce_panic("Failed kernel mode recovery", &m, NULL);
+                       mce_panic("Failed kernel mode recovery", &m, msg);
        }
 
 out_ist:
@@ -2041,7 +2029,7 @@ int __init mcheck_init(void)
 {
        mcheck_intel_therm_init();
        mce_register_decode_chain(&first_nb);
-       mce_register_decode_chain(&mce_srao_nb);
+       mce_register_decode_chain(&mce_uc_nb);
        mce_register_decode_chain(&mce_default_nb);
        mcheck_vendor_init_severity();
 
index 1f30117b24ba7af6220fb8a5001f46b9a536a7db..3413b41b8d55f202a646bb1439ecea262462159c 100644 (file)
@@ -494,7 +494,7 @@ static void do_inject(void)
                i_mce.status |= MCI_STATUS_SYNDV;
 
        if (inj_type == SW_INJ) {
-               mce_inject_log(&i_mce);
+               mce_log(&i_mce);
                return;
        }
 
index 842b273bce3140f843d65e998d297a6a4d41c8fc..b785c0d0b5907a062006ebe3c802c6c1e9b9f53e 100644 (file)
@@ -84,8 +84,6 @@ static inline int apei_clear_mce(u64 record_id)
 }
 #endif
 
-void mce_inject_log(struct mce *m);
-
 /*
  * We consider records to be equivalent if bank+status+addr+misc all match.
  * This is only used when the system is going down because of a fatal error
index b38010b541d6edff1f6b6cf885b6d76ba89847d5..58b4ee3cda7774c096cbbac4841976344bbe79a0 100644 (file)
@@ -235,7 +235,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp)
        *temp = (msr_val >> 16) & 0x7F;
 }
 
-static void throttle_active_work(struct work_struct *work)
+static void __maybe_unused throttle_active_work(struct work_struct *work)
 {
        struct _thermal_state *state = container_of(to_delayed_work(work),
                                                struct _thermal_state, therm_work);
@@ -467,6 +467,7 @@ static int thermal_throttle_online(unsigned int cpu)
 {
        struct thermal_state *state = &per_cpu(thermal_state, cpu);
        struct device *dev = get_cpu_device(cpu);
+       u32 l;
 
        state->package_throttle.level = PACKAGE_LEVEL;
        state->core_throttle.level = CORE_LEVEL;
@@ -474,6 +475,10 @@ static int thermal_throttle_online(unsigned int cpu)
        INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
        INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
 
+       /* Unmask the thermal vector after the above workqueues are initialized. */
+       l = apic_read(APIC_LVTTHMR);
+       apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+
        return thermal_throttle_add_dev(dev, cpu);
 }
 
@@ -722,10 +727,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
        wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
 
-       /* Unmask the thermal vector: */
-       l = apic_read(APIC_LVTTHMR);
-       apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
-
        pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
                      tm2 ? "TM2" : "TM1");
 
index aa5c064a6a227f5cc71f787b3a4122f56f57acfe..51b9190c628b220b77f5ab386e6b62677ffeb185 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 
 #include "mtrr.h"
 
index 507039c20128a6870e1e660d5f5bb3b470cfb18f..6a80f36b5d598ab51c9ba5600a5933900ae113ea 100644 (file)
@@ -52,7 +52,7 @@
 #include <asm/e820/api.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 
 #include "mtrr.h"
 
index 03eb90d00af0fb693a2298f0e956d05f7116ce04..89049b343c7a8f27b0af635253c911d15f393f2b 100644 (file)
@@ -618,7 +618,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                if (static_branch_unlikely(&rdt_mon_enable_key))
                        rmdir_mondata_subdir_allrdtgrp(r, d->id);
                list_del(&d->list);
-               if (is_mbm_enabled())
+               if (r->mon_capable && is_mbm_enabled())
                        cancel_delayed_work(&d->mbm_over);
                if (is_llc_occupancy_enabled() &&  has_busy_rmid(r, d)) {
                        /*
index e49b77283924a9b8b0d5f3a865483dc3baa4d321..181c992f448c068b4cabf8880eb9cf20716475d6 100644 (file)
@@ -57,6 +57,7 @@ static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
 }
 
 DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
 
 /**
  * struct mon_evt - Entry in the event list of a resource
index 397206f23d14f9aeadf860747fa42662d42baadf..773124b0e18ac0b1b6c48aedb6eed41dccde88cb 100644 (file)
@@ -514,7 +514,7 @@ void mbm_handle_overflow(struct work_struct *work)
 
        mutex_lock(&rdtgroup_mutex);
 
-       if (!static_branch_likely(&rdt_enable_key))
+       if (!static_branch_likely(&rdt_mon_enable_key))
                goto out_unlock;
 
        d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
@@ -543,7 +543,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
        unsigned long delay = msecs_to_jiffies(delay_ms);
        int cpu;
 
-       if (!static_branch_likely(&rdt_enable_key))
+       if (!static_branch_likely(&rdt_mon_enable_key))
                return;
        cpu = cpumask_any(&dom->cpu_mask);
        dom->mbm_work_cpu = cpu;
index 2e3b06d6bbc6df9b311bd6e9b942ea0ea4b1fc67..1504bcabc63c220bf79d082c01df3a4be272d046 100644 (file)
@@ -532,11 +532,15 @@ static void move_myself(struct callback_head *head)
                kfree(rdtgrp);
        }
 
+       if (unlikely(current->flags & PF_EXITING))
+               goto out;
+
        preempt_disable();
        /* update PQR_ASSOC MSR to make resource group go into effect */
        resctrl_sched_in();
        preempt_enable();
 
+out:
        kfree(callback);
 }
 
@@ -725,6 +729,92 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of,
        return ret;
 }
 
+#ifdef CONFIG_PROC_CPU_RESCTRL
+
+/*
+ * A task can only be part of one resctrl control group and of one monitor
+ * group which is associated to that control group.
+ *
+ * 1)   res:
+ *      mon:
+ *
+ *    resctrl is not available.
+ *
+ * 2)   res:/
+ *      mon:
+ *
+ *    Task is part of the root resctrl control group, and it is not associated
+ *    to any monitor group.
+ *
+ * 3)  res:/
+ *     mon:mon0
+ *
+ *    Task is part of the root resctrl control group and monitor group mon0.
+ *
+ * 4)  res:group0
+ *     mon:
+ *
+ *    Task is part of resctrl control group group0, and it is not associated
+ *    to any monitor group.
+ *
+ * 5) res:group0
+ *    mon:mon1
+ *
+ *    Task is part of resctrl control group group0 and monitor group mon1.
+ */
+int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
+                     struct pid *pid, struct task_struct *tsk)
+{
+       struct rdtgroup *rdtg;
+       int ret = 0;
+
+       mutex_lock(&rdtgroup_mutex);
+
+       /* Return empty if resctrl has not been mounted. */
+       if (!static_branch_unlikely(&rdt_enable_key)) {
+               seq_puts(s, "res:\nmon:\n");
+               goto unlock;
+       }
+
+       list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
+               struct rdtgroup *crg;
+
+               /*
+                * Task information is only relevant for shareable
+                * and exclusive groups.
+                */
+               if (rdtg->mode != RDT_MODE_SHAREABLE &&
+                   rdtg->mode != RDT_MODE_EXCLUSIVE)
+                       continue;
+
+               if (rdtg->closid != tsk->closid)
+                       continue;
+
+               seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
+                          rdtg->kn->name);
+               seq_puts(s, "mon:");
+               list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
+                                   mon.crdtgrp_list) {
+                       if (tsk->rmid != crg->mon.rmid)
+                               continue;
+                       seq_printf(s, "%s", crg->kn->name);
+                       break;
+               }
+               seq_putc(s, '\n');
+               goto unlock;
+       }
+       /*
+        * The above search should succeed. Otherwise return
+        * with an error.
+        */
+       ret = -ENOENT;
+unlock:
+       mutex_unlock(&rdtgroup_mutex);
+
+       return ret;
+}
+#endif
+
 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
                                    struct seq_file *seq, void *v)
 {
@@ -1741,9 +1831,6 @@ static int set_cache_qos_cfg(int level, bool enable)
        struct rdt_domain *d;
        int cpu;
 
-       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
-               return -ENOMEM;
-
        if (level == RDT_RESOURCE_L3)
                update = l3_qos_cfg_update;
        else if (level == RDT_RESOURCE_L2)
@@ -1751,6 +1838,9 @@ static int set_cache_qos_cfg(int level, bool enable)
        else
                return -EINVAL;
 
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
        r_l = &rdt_resources_all[level];
        list_for_each_entry(d, &r_l->domains, list) {
                /* Pick one CPU from each domain instance to update MSR */
index adf9b71386effa99207724997efd6eff34f3c10b..62b137c3c97a2089eafe530305eb111b16353132 100644 (file)
@@ -4,7 +4,7 @@
  */
 #include <linux/cpu.h>
 
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/apic.h>
 #include <asm/processor.h>
 
index ee48c3fc8a65e28049e83c72096208760d8f79c5..d3a0791bc052adee6dd73c46416a9ab8226e4f89 100644 (file)
@@ -7,7 +7,7 @@
 
 #include <linux/cpu.h>
 #include <asm/apic.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/processor.h>
 
 #include "cpu.h"
index 3e20d322bc98b636cde2747394d26fcec8783a0c..032509adf9de9f664e808083955de464a35f9d34 100644 (file)
@@ -115,11 +115,12 @@ void __init tsx_init(void)
                tsx_disable();
 
                /*
-                * tsx_disable() will change the state of the
-                * RTM CPUID bit.  Clear it here since it is now
-                * expected to be not set.
+                * tsx_disable() will change the state of the RTM and HLE CPUID
+                * bits. Clear them here since they are now expected to be not
+                * set.
                 */
                setup_clear_cpu_cap(X86_FEATURE_RTM);
+               setup_clear_cpu_cap(X86_FEATURE_HLE);
        } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
 
                /*
@@ -131,10 +132,10 @@ void __init tsx_init(void)
                tsx_enable();
 
                /*
-                * tsx_enable() will change the state of the
-                * RTM CPUID bit.  Force it here since it is now
-                * expected to be set.
+                * tsx_enable() will change the state of the RTM and HLE CPUID
+                * bits. Force them here since they are now expected to be set.
                 */
                setup_force_cpu_cap(X86_FEATURE_RTM);
+               setup_force_cpu_cap(X86_FEATURE_HLE);
        }
 }
index 4cba91ec80492ab42dda142a6555dd6dcf3dfbc0..2f9ec14be3b11adfb7526b0513196e333078ab7f 100644 (file)
@@ -710,8 +710,12 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_INTEL, 0x3e20,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        { PCI_VENDOR_ID_INTEL, 0x3ec4,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_INTEL, 0x8a12,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        { PCI_VENDOR_ID_BROADCOM, 0x4331,
          PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
index 319be936c348ef1f0c09ed99d243e4d04efb8005..fa31470bbf24aa72582b37d56879f16081fe6eaa 100644 (file)
@@ -259,7 +259,7 @@ static void __init setup_xstate_features(void)
                                                   xmm_space);
 
        xstate_offsets[XFEATURE_SSE]    = xstate_sizes[XFEATURE_FP];
-       xstate_sizes[XFEATURE_SSE]      = FIELD_SIZEOF(struct fxregs_state,
+       xstate_sizes[XFEATURE_SSE]      = sizeof_field(struct fxregs_state,
                                                       xmm_space);
 
        for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
index 060a361d9d11b09303d901cbfa7dff896d5af1d2..37a0aeaf89e771b63bccbeab92979469cbd96ecb 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/memory.h>
+#include <linux/vmalloc.h>
 
 #include <trace/syscall.h>
 
@@ -34,6 +35,8 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+static int ftrace_poke_late = 0;
+
 int ftrace_arch_code_modify_prepare(void)
     __acquires(&text_mutex)
 {
@@ -43,84 +46,37 @@ int ftrace_arch_code_modify_prepare(void)
         * ftrace has it set to "read/write".
         */
        mutex_lock(&text_mutex);
-       set_kernel_text_rw();
-       set_all_modules_text_rw();
+       ftrace_poke_late = 1;
        return 0;
 }
 
 int ftrace_arch_code_modify_post_process(void)
     __releases(&text_mutex)
 {
-       set_all_modules_text_ro();
-       set_kernel_text_ro();
+       /*
+        * ftrace_make_{call,nop}() may be called during
+        * module load, and we need to finish the text_poke_queue()
+        * that they do, here.
+        */
+       text_poke_finish();
+       ftrace_poke_late = 0;
        mutex_unlock(&text_mutex);
        return 0;
 }
 
-union ftrace_code_union {
-       char code[MCOUNT_INSN_SIZE];
-       struct {
-               unsigned char op;
-               int offset;
-       } __attribute__((packed));
-};
-
-static int ftrace_calc_offset(long ip, long addr)
-{
-       return (int)(addr - ip);
-}
-
-static unsigned char *
-ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
+static const char *ftrace_nop_replace(void)
 {
-       static union ftrace_code_union calc;
-
-       calc.op         = op;
-       calc.offset     = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
-
-       return calc.code;
-}
-
-static unsigned char *
-ftrace_call_replace(unsigned long ip, unsigned long addr)
-{
-       return ftrace_text_replace(0xe8, ip, addr);
-}
-
-static inline int
-within(unsigned long addr, unsigned long start, unsigned long end)
-{
-       return addr >= start && addr < end;
+       return ideal_nops[NOP_ATOMIC5];
 }
 
-static unsigned long text_ip_addr(unsigned long ip)
+static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
-       /*
-        * On x86_64, kernel text mappings are mapped read-only, so we use
-        * the kernel identity mapping instead of the kernel text mapping
-        * to modify the kernel text.
-        *
-        * For 32bit kernels, these mappings are same and we can use
-        * kernel identity mapping to modify code.
-        */
-       if (within(ip, (unsigned long)_text, (unsigned long)_etext))
-               ip = (unsigned long)__va(__pa_symbol(ip));
-
-       return ip;
+       return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
 }
 
-static const unsigned char *ftrace_nop_replace(void)
+static int ftrace_verify_code(unsigned long ip, const char *old_code)
 {
-       return ideal_nops[NOP_ATOMIC5];
-}
-
-static int
-ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
-                  unsigned const char *new_code)
-{
-       unsigned char replaced[MCOUNT_INSN_SIZE];
-
-       ftrace_expected = old_code;
+       char cur_code[MCOUNT_INSN_SIZE];
 
        /*
         * Note:
@@ -129,31 +85,46 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
         * Carefully read and modify the code with probe_kernel_*(), and make
         * sure what we read is what we expected it to be before modifying it.
         */
-
        /* read the text we want to modify */
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+       if (probe_kernel_read(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
+               WARN_ON(1);
                return -EFAULT;
+       }
 
        /* Make sure it is what we expect it to be */
-       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+       if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
+               WARN_ON(1);
                return -EINVAL;
+       }
 
-       ip = text_ip_addr(ip);
-
-       /* replace the text with the new text */
-       if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
-               return -EPERM;
+       return 0;
+}
 
-       sync_core();
+/*
+ * Marked __ref because it calls text_poke_early() which is .init.text. That is
+ * ok because that call will happen early, during boot, when .init sections are
+ * still present.
+ */
+static int __ref
+ftrace_modify_code_direct(unsigned long ip, const char *old_code,
+                         const char *new_code)
+{
+       int ret = ftrace_verify_code(ip, old_code);
+       if (ret)
+               return ret;
 
+       /* replace the text with the new text */
+       if (ftrace_poke_late)
+               text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
+       else
+               text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
        return 0;
 }
 
-int ftrace_make_nop(struct module *mod,
-                   struct dyn_ftrace *rec, unsigned long addr)
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
 {
-       unsigned const char *new, *old;
        unsigned long ip = rec->ip;
+       const char *new, *old;
 
        old = ftrace_call_replace(ip, addr);
        new = ftrace_nop_replace();
@@ -167,19 +138,20 @@ int ftrace_make_nop(struct module *mod,
         * just modify the code directly.
         */
        if (addr == MCOUNT_ADDR)
-               return ftrace_modify_code_direct(rec->ip, old, new);
+               return ftrace_modify_code_direct(ip, old, new);
 
-       ftrace_expected = NULL;
-
-       /* Normal cases use add_brk_on_nop */
+       /*
+        * x86 overrides ftrace_replace_code -- this function will never be used
+        * in this case.
+        */
        WARN_ONCE(1, "invalid use of ftrace_make_nop");
        return -EINVAL;
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
-       unsigned const char *new, *old;
        unsigned long ip = rec->ip;
+       const char *new, *old;
 
        old = ftrace_nop_replace();
        new = ftrace_call_replace(ip, addr);
@@ -188,43 +160,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        return ftrace_modify_code_direct(rec->ip, old, new);
 }
 
-/*
- * The modifying_ftrace_code is used to tell the breakpoint
- * handler to call ftrace_int3_handler(). If it fails to
- * call this handler for a breakpoint added by ftrace, then
- * the kernel may crash.
- *
- * As atomic_writes on x86 do not need a barrier, we do not
- * need to add smp_mb()s for this to work. It is also considered
- * that we can not read the modifying_ftrace_code before
- * executing the breakpoint. That would be quite remarkable if
- * it could do that. Here's the flow that is required:
- *
- *   CPU-0                          CPU-1
- *
- * atomic_inc(mfc);
- * write int3s
- *                             <trap-int3> // implicit (r)mb
- *                             if (atomic_read(mfc))
- *                                     call ftrace_int3_handler()
- *
- * Then when we are finished:
- *
- * atomic_dec(mfc);
- *
- * If we hit a breakpoint that was not set by ftrace, it does not
- * matter if ftrace_int3_handler() is called or not. It will
- * simply be ignored. But it is crucial that a ftrace nop/caller
- * breakpoint is handled. No other user should ever place a
- * breakpoint on an ftrace nop/caller location. It must only
- * be done by this code.
- */
-atomic_t modifying_ftrace_code __read_mostly;
-
-static int
-ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
-                  unsigned const char *new_code);
-
 /*
  * Should never be called:
  *  As it is only called by __ftrace_replace_code() which is called by
@@ -237,452 +172,84 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                                 unsigned long addr)
 {
        WARN_ON(1);
-       ftrace_expected = NULL;
        return -EINVAL;
 }
 
-static unsigned long ftrace_update_func;
-static unsigned long ftrace_update_func_call;
-
-static int update_ftrace_func(unsigned long ip, void *new)
-{
-       unsigned char old[MCOUNT_INSN_SIZE];
-       int ret;
-
-       memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
-
-       ftrace_update_func = ip;
-       /* Make sure the breakpoints see the ftrace_update_func update */
-       smp_wmb();
-
-       /* See comment above by declaration of modifying_ftrace_code */
-       atomic_inc(&modifying_ftrace_code);
-
-       ret = ftrace_modify_code(ip, old, new);
-
-       atomic_dec(&modifying_ftrace_code);
-
-       return ret;
-}
-
 int ftrace_update_ftrace_func(ftrace_func_t func)
-{
-       unsigned long ip = (unsigned long)(&ftrace_call);
-       unsigned char *new;
-       int ret;
-
-       ftrace_update_func_call = (unsigned long)func;
-
-       new = ftrace_call_replace(ip, (unsigned long)func);
-       ret = update_ftrace_func(ip, new);
-
-       /* Also update the regs callback function */
-       if (!ret) {
-               ip = (unsigned long)(&ftrace_regs_call);
-               new = ftrace_call_replace(ip, (unsigned long)func);
-               ret = update_ftrace_func(ip, new);
-       }
-
-       return ret;
-}
-
-static nokprobe_inline int is_ftrace_caller(unsigned long ip)
-{
-       if (ip == ftrace_update_func)
-               return 1;
-
-       return 0;
-}
-
-/*
- * A breakpoint was added to the code address we are about to
- * modify, and this is the handle that will just skip over it.
- * We are either changing a nop into a trace call, or a trace
- * call to a nop. While the change is taking place, we treat
- * it just like it was a nop.
- */
-int ftrace_int3_handler(struct pt_regs *regs)
 {
        unsigned long ip;
+       const char *new;
 
-       if (WARN_ON_ONCE(!regs))
-               return 0;
-
-       ip = regs->ip - INT3_INSN_SIZE;
-
-       if (ftrace_location(ip)) {
-               int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
-               return 1;
-       } else if (is_ftrace_caller(ip)) {
-               if (!ftrace_update_func_call) {
-                       int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
-                       return 1;
-               }
-               int3_emulate_call(regs, ftrace_update_func_call);
-               return 1;
-       }
-
-       return 0;
-}
-NOKPROBE_SYMBOL(ftrace_int3_handler);
-
-static int ftrace_write(unsigned long ip, const char *val, int size)
-{
-       ip = text_ip_addr(ip);
-
-       if (probe_kernel_write((void *)ip, val, size))
-               return -EPERM;
-
-       return 0;
-}
-
-static int add_break(unsigned long ip, const char *old)
-{
-       unsigned char replaced[MCOUNT_INSN_SIZE];
-       unsigned char brk = BREAKPOINT_INSTRUCTION;
-
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
-               return -EFAULT;
-
-       ftrace_expected = old;
-
-       /* Make sure it is what we expect it to be */
-       if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
-               return -EINVAL;
-
-       return ftrace_write(ip, &brk, 1);
-}
-
-static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
-{
-       unsigned const char *old;
-       unsigned long ip = rec->ip;
-
-       old = ftrace_call_replace(ip, addr);
-
-       return add_break(rec->ip, old);
-}
-
-
-static int add_brk_on_nop(struct dyn_ftrace *rec)
-{
-       unsigned const char *old;
-
-       old = ftrace_nop_replace();
-
-       return add_break(rec->ip, old);
-}
-
-static int add_breakpoints(struct dyn_ftrace *rec, bool enable)
-{
-       unsigned long ftrace_addr;
-       int ret;
-
-       ftrace_addr = ftrace_get_addr_curr(rec);
-
-       ret = ftrace_test_record(rec, enable);
-
-       switch (ret) {
-       case FTRACE_UPDATE_IGNORE:
-               return 0;
-
-       case FTRACE_UPDATE_MAKE_CALL:
-               /* converting nop to call */
-               return add_brk_on_nop(rec);
-
-       case FTRACE_UPDATE_MODIFY_CALL:
-       case FTRACE_UPDATE_MAKE_NOP:
-               /* converting a call to a nop */
-               return add_brk_on_call(rec, ftrace_addr);
-       }
-       return 0;
-}
-
-/*
- * On error, we need to remove breakpoints. This needs to
- * be done caefully. If the address does not currently have a
- * breakpoint, we know we are done. Otherwise, we look at the
- * remaining 4 bytes of the instruction. If it matches a nop
- * we replace the breakpoint with the nop. Otherwise we replace
- * it with the call instruction.
- */
-static int remove_breakpoint(struct dyn_ftrace *rec)
-{
-       unsigned char ins[MCOUNT_INSN_SIZE];
-       unsigned char brk = BREAKPOINT_INSTRUCTION;
-       const unsigned char *nop;
-       unsigned long ftrace_addr;
-       unsigned long ip = rec->ip;
-
-       /* If we fail the read, just give up */
-       if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
-               return -EFAULT;
-
-       /* If this does not have a breakpoint, we are done */
-       if (ins[0] != brk)
-               return 0;
-
-       nop = ftrace_nop_replace();
-
-       /*
-        * If the last 4 bytes of the instruction do not match
-        * a nop, then we assume that this is a call to ftrace_addr.
-        */
-       if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
-               /*
-                * For extra paranoidism, we check if the breakpoint is on
-                * a call that would actually jump to the ftrace_addr.
-                * If not, don't touch the breakpoint, we make just create
-                * a disaster.
-                */
-               ftrace_addr = ftrace_get_addr_new(rec);
-               nop = ftrace_call_replace(ip, ftrace_addr);
-
-               if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
-                       goto update;
-
-               /* Check both ftrace_addr and ftrace_old_addr */
-               ftrace_addr = ftrace_get_addr_curr(rec);
-               nop = ftrace_call_replace(ip, ftrace_addr);
-
-               ftrace_expected = nop;
-
-               if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
-                       return -EINVAL;
-       }
-
- update:
-       return ftrace_write(ip, nop, 1);
-}
-
-static int add_update_code(unsigned long ip, unsigned const char *new)
-{
-       /* skip breakpoint */
-       ip++;
-       new++;
-       return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
-}
-
-static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
-{
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-
-       new = ftrace_call_replace(ip, addr);
-       return add_update_code(ip, new);
-}
-
-static int add_update_nop(struct dyn_ftrace *rec)
-{
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-
-       new = ftrace_nop_replace();
-       return add_update_code(ip, new);
-}
-
-static int add_update(struct dyn_ftrace *rec, bool enable)
-{
-       unsigned long ftrace_addr;
-       int ret;
-
-       ret = ftrace_test_record(rec, enable);
-
-       ftrace_addr  = ftrace_get_addr_new(rec);
-
-       switch (ret) {
-       case FTRACE_UPDATE_IGNORE:
-               return 0;
-
-       case FTRACE_UPDATE_MODIFY_CALL:
-       case FTRACE_UPDATE_MAKE_CALL:
-               /* converting nop to call */
-               return add_update_call(rec, ftrace_addr);
-
-       case FTRACE_UPDATE_MAKE_NOP:
-               /* converting a call to a nop */
-               return add_update_nop(rec);
-       }
-
-       return 0;
-}
-
-static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
-{
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-
-       new = ftrace_call_replace(ip, addr);
-
-       return ftrace_write(ip, new, 1);
-}
-
-static int finish_update_nop(struct dyn_ftrace *rec)
-{
-       unsigned long ip = rec->ip;
-       unsigned const char *new;
-
-       new = ftrace_nop_replace();
-
-       return ftrace_write(ip, new, 1);
-}
-
-static int finish_update(struct dyn_ftrace *rec, bool enable)
-{
-       unsigned long ftrace_addr;
-       int ret;
-
-       ret = ftrace_update_record(rec, enable);
-
-       ftrace_addr = ftrace_get_addr_new(rec);
-
-       switch (ret) {
-       case FTRACE_UPDATE_IGNORE:
-               return 0;
-
-       case FTRACE_UPDATE_MODIFY_CALL:
-       case FTRACE_UPDATE_MAKE_CALL:
-               /* converting nop to call */
-               return finish_update_call(rec, ftrace_addr);
+       ip = (unsigned long)(&ftrace_call);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
 
-       case FTRACE_UPDATE_MAKE_NOP:
-               /* converting a call to a nop */
-               return finish_update_nop(rec);
-       }
+       ip = (unsigned long)(&ftrace_regs_call);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
 
        return 0;
 }
 
-static void do_sync_core(void *data)
-{
-       sync_core();
-}
-
-static void run_sync(void)
-{
-       int enable_irqs;
-
-       /* No need to sync if there's only one CPU */
-       if (num_online_cpus() == 1)
-               return;
-
-       enable_irqs = irqs_disabled();
-
-       /* We may be called with interrupts disabled (on bootup). */
-       if (enable_irqs)
-               local_irq_enable();
-       on_each_cpu(do_sync_core, NULL, 1);
-       if (enable_irqs)
-               local_irq_disable();
-}
-
 void ftrace_replace_code(int enable)
 {
        struct ftrace_rec_iter *iter;
        struct dyn_ftrace *rec;
-       const char *report = "adding breakpoints";
-       int count = 0;
+       const char *new, *old;
        int ret;
 
        for_ftrace_rec_iter(iter) {
                rec = ftrace_rec_iter_record(iter);
 
-               ret = add_breakpoints(rec, enable);
-               if (ret)
-                       goto remove_breakpoints;
-               count++;
-       }
-
-       run_sync();
+               switch (ftrace_test_record(rec, enable)) {
+               case FTRACE_UPDATE_IGNORE:
+               default:
+                       continue;
 
-       report = "updating code";
-       count = 0;
+               case FTRACE_UPDATE_MAKE_CALL:
+                       old = ftrace_nop_replace();
+                       break;
 
-       for_ftrace_rec_iter(iter) {
-               rec = ftrace_rec_iter_record(iter);
+               case FTRACE_UPDATE_MODIFY_CALL:
+               case FTRACE_UPDATE_MAKE_NOP:
+                       old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
+                       break;
+               }
 
-               ret = add_update(rec, enable);
-               if (ret)
-                       goto remove_breakpoints;
-               count++;
+               ret = ftrace_verify_code(rec->ip, old);
+               if (ret) {
+                       ftrace_bug(ret, rec);
+                       return;
+               }
        }
 
-       run_sync();
-
-       report = "removing breakpoints";
-       count = 0;
-
        for_ftrace_rec_iter(iter) {
                rec = ftrace_rec_iter_record(iter);
 
-               ret = finish_update(rec, enable);
-               if (ret)
-                       goto remove_breakpoints;
-               count++;
-       }
+               switch (ftrace_test_record(rec, enable)) {
+               case FTRACE_UPDATE_IGNORE:
+               default:
+                       continue;
 
-       run_sync();
+               case FTRACE_UPDATE_MAKE_CALL:
+               case FTRACE_UPDATE_MODIFY_CALL:
+                       new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
+                       break;
 
-       return;
+               case FTRACE_UPDATE_MAKE_NOP:
+                       new = ftrace_nop_replace();
+                       break;
+               }
 
- remove_breakpoints:
-       pr_warn("Failed on %s (%d):\n", report, count);
-       ftrace_bug(ret, rec);
-       for_ftrace_rec_iter(iter) {
-               rec = ftrace_rec_iter_record(iter);
-               /*
-                * Breakpoints are handled only when this function is in
-                * progress. The system could not work with them.
-                */
-               if (remove_breakpoint(rec))
-                       BUG();
+               text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
+               ftrace_update_record(rec, enable);
        }
-       run_sync();
-}
-
-static int
-ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
-                  unsigned const char *new_code)
-{
-       int ret;
-
-       ret = add_break(ip, old_code);
-       if (ret)
-               goto out;
-
-       run_sync();
-
-       ret = add_update_code(ip, new_code);
-       if (ret)
-               goto fail_update;
-
-       run_sync();
-
-       ret = ftrace_write(ip, new_code, 1);
-       /*
-        * The breakpoint is handled only when this function is in progress.
-        * The system could not work if we could not remove it.
-        */
-       BUG_ON(ret);
- out:
-       run_sync();
-       return ret;
-
- fail_update:
-       /* Also here the system could not work with the breakpoint */
-       if (ftrace_write(ip, old_code, 1))
-               BUG();
-       goto out;
+       text_poke_finish();
 }
 
 void arch_ftrace_update_code(int command)
 {
-       /* See comment above by declaration of modifying_ftrace_code */
-       atomic_inc(&modifying_ftrace_code);
-
        ftrace_modify_all_code(command);
-
-       atomic_dec(&modifying_ftrace_code);
 }
 
 int __init ftrace_dyn_arch_init(void)
@@ -747,6 +314,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
        unsigned long start_offset;
        unsigned long end_offset;
        unsigned long op_offset;
+       unsigned long call_offset;
        unsigned long offset;
        unsigned long npages;
        unsigned long size;
@@ -763,10 +331,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
                start_offset = (unsigned long)ftrace_regs_caller;
                end_offset = (unsigned long)ftrace_regs_caller_end;
                op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
+               call_offset = (unsigned long)ftrace_regs_call;
        } else {
                start_offset = (unsigned long)ftrace_caller;
                end_offset = (unsigned long)ftrace_epilogue;
                op_offset = (unsigned long)ftrace_caller_op_ptr;
+               call_offset = (unsigned long)ftrace_call;
        }
 
        size = end_offset - start_offset;
@@ -823,16 +393,21 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
        /* put in the new offset to the ftrace_ops */
        memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
 
+       /* put in the call to the function */
+       mutex_lock(&text_mutex);
+       call_offset -= start_offset;
+       memcpy(trampoline + call_offset,
+              text_gen_insn(CALL_INSN_OPCODE,
+                            trampoline + call_offset,
+                            ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
+       mutex_unlock(&text_mutex);
+
        /* ALLOC_TRAMP flags lets us know we created it */
        ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
 
        set_vm_flush_reset_perms(trampoline);
 
-       /*
-        * Module allocation needs to be completed by making the page
-        * executable. The page is still writable, which is a security hazard,
-        * but anyhow ftrace breaks W^X completely.
-        */
+       set_memory_ro((unsigned long)trampoline, npages);
        set_memory_x((unsigned long)trampoline, npages);
        return (unsigned long)trampoline;
 fail:
@@ -859,62 +434,54 @@ static unsigned long calc_trampoline_call_offset(bool save_regs)
 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
 {
        ftrace_func_t func;
-       unsigned char *new;
        unsigned long offset;
        unsigned long ip;
        unsigned int size;
-       int ret, npages;
+       const char *new;
 
-       if (ops->trampoline) {
-               /*
-                * The ftrace_ops caller may set up its own trampoline.
-                * In such a case, this code must not modify it.
-                */
-               if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
-                       return;
-               npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
-               set_memory_rw(ops->trampoline, npages);
-       } else {
+       if (!ops->trampoline) {
                ops->trampoline = create_trampoline(ops, &size);
                if (!ops->trampoline)
                        return;
                ops->trampoline_size = size;
-               npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+               return;
        }
 
+       /*
+        * The ftrace_ops caller may set up its own trampoline.
+        * In such a case, this code must not modify it.
+        */
+       if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
+               return;
+
        offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
        ip = ops->trampoline + offset;
-
        func = ftrace_ops_get_func(ops);
 
-       ftrace_update_func_call = (unsigned long)func;
-
+       mutex_lock(&text_mutex);
        /* Do a safe modify in case the trampoline is executing */
        new = ftrace_call_replace(ip, (unsigned long)func);
-       ret = update_ftrace_func(ip, new);
-       set_memory_ro(ops->trampoline, npages);
-
-       /* The update should never fail */
-       WARN_ON(ret);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+       mutex_unlock(&text_mutex);
 }
 
 /* Return the address of the function the trampoline calls */
 static void *addr_from_call(void *ptr)
 {
-       union ftrace_code_union calc;
+       union text_poke_insn call;
        int ret;
 
-       ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
+       ret = probe_kernel_read(&call, ptr, CALL_INSN_SIZE);
        if (WARN_ON_ONCE(ret < 0))
                return NULL;
 
        /* Make sure this is a call */
-       if (WARN_ON_ONCE(calc.op != 0xe8)) {
-               pr_warn("Expected e8, got %x\n", calc.op);
+       if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
+               pr_warn("Expected E8, got %x\n", call.opcode);
                return NULL;
        }
 
-       return ptr + MCOUNT_INSN_SIZE + calc.offset;
+       return ptr + CALL_INSN_SIZE + call.disp;
 }
 
 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
@@ -981,19 +548,18 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern void ftrace_graph_call(void);
 
-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
+static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
 {
-       return ftrace_text_replace(0xe9, ip, addr);
+       return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
 }
 
 static int ftrace_mod_jmp(unsigned long ip, void *func)
 {
-       unsigned char *new;
+       const char *new;
 
-       ftrace_update_func_call = 0UL;
        new = ftrace_jmp_replace(ip, (unsigned long)func);
-
-       return update_ftrace_func(ip, new);
+       text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
+       return 0;
 }
 
 int ftrace_enable_ftrace_graph_caller(void)
@@ -1019,10 +585,9 @@ int ftrace_disable_ftrace_graph_caller(void)
 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
                           unsigned long frame_pointer)
 {
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
        unsigned long old;
        int faulted;
-       unsigned long return_hooker = (unsigned long)
-                               &return_to_handler;
 
        /*
         * When resuming from suspend-to-ram, this function can be indirectly
@@ -1042,20 +607,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
-       /*
-        * If the return location is actually pointing directly to
-        * the start of a direct trampoline (if we trace the trampoline
-        * it will still be offset by MCOUNT_INSN_SIZE), then the
-        * return address is actually off by one word, and we
-        * need to adjust for that.
-        */
-       if (ftrace_direct_func_count) {
-               if (ftrace_find_direct_func(self_addr + MCOUNT_INSN_SIZE)) {
-                       self_addr = *parent;
-                       parent++;
-               }
-       }
-
        /*
         * Protect against fault, even if it shouldn't
         * happen. This tool is too much intrusive to
index c6f791bc481eb19d5fb4cb34989c2b485c82c038..7a50f0b62a709091dfd17619252efbdf4eb1ad7f 100644 (file)
@@ -84,7 +84,7 @@ static inline void hpet_writel(unsigned int d, unsigned int a)
 
 static inline void hpet_set_mapping(void)
 {
-       hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
+       hpet_virt_address = ioremap(hpet_address, HPET_MMAP_SIZE);
 }
 
 static inline void hpet_clear_mapping(void)
index c1a8b9e714086387691f2b3cfe547eca0c55db6d..9c4498ea0b3c0d212d2d21ab20ca6cae309ef729 100644 (file)
 #include <asm/alternative.h>
 #include <asm/text-patching.h>
 
-union jump_code_union {
-       char code[JUMP_LABEL_NOP_SIZE];
-       struct {
-               char jump;
-               int offset;
-       } __attribute__((packed));
-};
-
-static void bug_at(unsigned char *ip, int line)
+static void bug_at(const void *ip, int line)
 {
        /*
         * The location is not an op that we were expecting.
@@ -35,42 +27,42 @@ static void bug_at(unsigned char *ip, int line)
        BUG();
 }
 
-static void __jump_label_set_jump_code(struct jump_entry *entry,
-                                      enum jump_label_type type,
-                                      union jump_code_union *code,
-                                      int init)
+static const void *
+__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, int init)
 {
        const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
        const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
-       const void *expect;
+       const void *expect, *code;
+       const void *addr, *dest;
        int line;
 
-       code->jump = 0xe9;
-       code->offset = jump_entry_target(entry) -
-                      (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
+       addr = (void *)jump_entry_code(entry);
+       dest = (void *)jump_entry_target(entry);
+
+       code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
 
        if (init) {
                expect = default_nop; line = __LINE__;
        } else if (type == JUMP_LABEL_JMP) {
                expect = ideal_nop; line = __LINE__;
        } else {
-               expect = code->code; line = __LINE__;
+               expect = code; line = __LINE__;
        }
 
-       if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
-               bug_at((void *)jump_entry_code(entry), line);
+       if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
+               bug_at(addr, line);
 
        if (type == JUMP_LABEL_NOP)
-               memcpy(code, ideal_nop, JUMP_LABEL_NOP_SIZE);
+               code = ideal_nop;
+
+       return code;
 }
 
-static void __ref __jump_label_transform(struct jump_entry *entry,
-                                        enum jump_label_type type,
-                                        int init)
+static void inline __jump_label_transform(struct jump_entry *entry,
+                                         enum jump_label_type type,
+                                         int init)
 {
-       union jump_code_union code;
-
-       __jump_label_set_jump_code(entry, type, &code, init);
+       const void *opcode = __jump_label_set_jump_code(entry, type, init);
 
        /*
         * As long as only a single processor is running and the code is still
@@ -84,31 +76,33 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
         * always nop being the 'currently valid' instruction
         */
        if (init || system_state == SYSTEM_BOOTING) {
-               text_poke_early((void *)jump_entry_code(entry), &code,
+               text_poke_early((void *)jump_entry_code(entry), opcode,
                                JUMP_LABEL_NOP_SIZE);
                return;
        }
 
-       text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE, NULL);
+       text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
 }
 
-void arch_jump_label_transform(struct jump_entry *entry,
-                              enum jump_label_type type)
+static void __ref jump_label_transform(struct jump_entry *entry,
+                                      enum jump_label_type type,
+                                      int init)
 {
        mutex_lock(&text_mutex);
-       __jump_label_transform(entry, type, 0);
+       __jump_label_transform(entry, type, init);
        mutex_unlock(&text_mutex);
 }
 
-#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
-static struct text_poke_loc tp_vec[TP_VEC_MAX];
-static int tp_vec_nr;
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       jump_label_transform(entry, type, 0);
+}
 
 bool arch_jump_label_transform_queue(struct jump_entry *entry,
                                     enum jump_label_type type)
 {
-       struct text_poke_loc *tp;
-       void *entry_code;
+       const void *opcode;
 
        if (system_state == SYSTEM_BOOTING) {
                /*
@@ -118,53 +112,19 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
                return true;
        }
 
-       /*
-        * No more space in the vector, tell upper layer to apply
-        * the queue before continuing.
-        */
-       if (tp_vec_nr == TP_VEC_MAX)
-               return false;
-
-       tp = &tp_vec[tp_vec_nr];
-
-       entry_code = (void *)jump_entry_code(entry);
-
-       /*
-        * The INT3 handler will do a bsearch in the queue, so we need entries
-        * to be sorted. We can survive an unsorted list by rejecting the entry,
-        * forcing the generic jump_label code to apply the queue. Warning once,
-        * to raise the attention to the case of an unsorted entry that is
-        * better not happen, because, in the worst case we will perform in the
-        * same way as we do without batching - with some more overhead.
-        */
-       if (tp_vec_nr > 0) {
-               int prev = tp_vec_nr - 1;
-               struct text_poke_loc *prev_tp = &tp_vec[prev];
-
-               if (WARN_ON_ONCE(prev_tp->addr > entry_code))
-                       return false;
-       }
-
-       __jump_label_set_jump_code(entry, type,
-                                  (union jump_code_union *)&tp->text, 0);
-
-       text_poke_loc_init(tp, entry_code, NULL, JUMP_LABEL_NOP_SIZE, NULL);
-
-       tp_vec_nr++;
-
+       mutex_lock(&text_mutex);
+       opcode = __jump_label_set_jump_code(entry, type, 0);
+       text_poke_queue((void *)jump_entry_code(entry),
+                       opcode, JUMP_LABEL_NOP_SIZE, NULL);
+       mutex_unlock(&text_mutex);
        return true;
 }
 
 void arch_jump_label_transform_apply(void)
 {
-       if (!tp_vec_nr)
-               return;
-
        mutex_lock(&text_mutex);
-       text_poke_bp_batch(tp_vec, tp_vec_nr);
+       text_poke_finish();
        mutex_unlock(&text_mutex);
-
-       tp_vec_nr = 0;
 }
 
 static enum {
@@ -193,5 +153,5 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
                        jlstate = JL_STATE_NO_UPDATE;
        }
        if (jlstate == JL_STATE_UPDATE)
-               __jump_label_transform(entry, type, 1);
+               jump_label_transform(entry, type, 1);
 }
index d2f4e706a428c61090973b43bdaa986f3d4b7558..f293d872602a4db853308ff4426f0804494d5724 100644 (file)
@@ -177,7 +177,7 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
         * acpi_rsdp=<addr> on kernel command line to make second kernel boot
         * without efi.
         */
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                return 0;
 
        params->secure_boot = boot_params.secure_boot;
index 4f13af7cbcdb17f169e0a2a02003573f08442da5..4d7022a740ab0b3f8dd6e79644e5b4e1080642ff 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/frame.h>
 #include <linux/kasan.h>
 #include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -119,14 +120,14 @@ __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
 void synthesize_reljump(void *dest, void *from, void *to)
 {
-       __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
+       __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
 }
 NOKPROBE_SYMBOL(synthesize_reljump);
 
 /* Insert a call instruction at address 'from', which calls address 'to'.*/
 void synthesize_relcall(void *dest, void *from, void *to)
 {
-       __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
+       __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
 }
 NOKPROBE_SYMBOL(synthesize_relcall);
 
@@ -301,7 +302,7 @@ static int can_probe(unsigned long paddr)
                 * Another debugging subsystem might insert this breakpoint.
                 * In that case, we can't recover it.
                 */
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
                        return 0;
                addr += insn.length;
        }
@@ -356,7 +357,7 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
                return 0;
 
        /* Another subsystem puts a breakpoint, failed to recover */
-       if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+       if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
                return 0;
 
        /* We should not singlestep on the exception masking instructions */
@@ -400,14 +401,14 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
        int len = insn->length;
 
        if (can_boost(insn, p->addr) &&
-           MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) {
+           MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
                /*
                 * These instructions can be executed directly if it
                 * jumps back to correct address.
                 */
                synthesize_reljump(buf + len, p->ainsn.insn + len,
                                   p->addr + insn->length);
-               len += RELATIVEJUMP_SIZE;
+               len += JMP32_INSN_SIZE;
                p->ainsn.boostable = true;
        } else {
                p->ainsn.boostable = false;
@@ -501,12 +502,14 @@ int arch_prepare_kprobe(struct kprobe *p)
 
 void arch_arm_kprobe(struct kprobe *p)
 {
-       text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
+       text_poke(p->addr, ((unsigned char []){INT3_INSN_OPCODE}), 1);
+       text_poke_sync();
 }
 
 void arch_disarm_kprobe(struct kprobe *p)
 {
        text_poke(p->addr, &p->opcode, 1);
+       text_poke_sync();
 }
 
 void arch_remove_kprobe(struct kprobe *p)
@@ -609,7 +612,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
        regs->flags |= X86_EFLAGS_TF;
        regs->flags &= ~X86_EFLAGS_IF;
        /* single step inline if the instruction is an int3 */
-       if (p->opcode == BREAKPOINT_INSTRUCTION)
+       if (p->opcode == INT3_INSN_OPCODE)
                regs->ip = (unsigned long)p->addr;
        else
                regs->ip = (unsigned long)p->ainsn.insn;
@@ -695,7 +698,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
                                reset_current_kprobe();
                        return 1;
                }
-       } else if (*addr != BREAKPOINT_INSTRUCTION) {
+       } else if (*addr != INT3_INSN_OPCODE) {
                /*
                 * The breakpoint instruction was removed right
                 * after we hit it.  Another cpu has removed
index 8900329c28a7104f68a4a38c8816e3d9dc6c5edd..3f45b5c43a71c51042250b7354cbfb962019579b 100644 (file)
@@ -38,7 +38,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
        long offs;
        int i;
 
-       for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
+       for (i = 0; i < JMP32_INSN_SIZE; i++) {
                kp = get_kprobe((void *)addr - i);
                /* This function only handles jump-optimized kprobe */
                if (kp && kprobe_optimized(kp)) {
@@ -62,10 +62,10 @@ found:
 
        if (addr == (unsigned long)kp->addr) {
                buf[0] = kp->opcode;
-               memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+               memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
        } else {
                offs = addr - (unsigned long)kp->addr - 1;
-               memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
+               memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
        }
 
        return (unsigned long)buf;
@@ -141,8 +141,6 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
 #define TMPL_END_IDX \
        ((long)optprobe_template_end - (long)optprobe_template_entry)
 
-#define INT3_SIZE sizeof(kprobe_opcode_t)
-
 /* Optimized kprobe call back function: called from optinsn */
 static void
 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
@@ -162,7 +160,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
                regs->cs |= get_kernel_rpl();
                regs->gs = 0;
 #endif
-               regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
+               regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
                regs->orig_ax = ~0UL;
 
                __this_cpu_write(current_kprobe, &op->kp);
@@ -179,7 +177,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
        struct insn insn;
        int len = 0, ret;
 
-       while (len < RELATIVEJUMP_SIZE) {
+       while (len < JMP32_INSN_SIZE) {
                ret = __copy_instruction(dest + len, src + len, real + len, &insn);
                if (!ret || !can_boost(&insn, src + len))
                        return -EINVAL;
@@ -271,7 +269,7 @@ static int can_optimize(unsigned long paddr)
                return 0;
 
        /* Check there is enough space for a relative jump. */
-       if (size - offset < RELATIVEJUMP_SIZE)
+       if (size - offset < JMP32_INSN_SIZE)
                return 0;
 
        /* Decode instructions */
@@ -290,15 +288,15 @@ static int can_optimize(unsigned long paddr)
                kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
                insn_get_length(&insn);
                /* Another subsystem puts a breakpoint */
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
                        return 0;
                /* Recover address */
                insn.kaddr = (void *)addr;
                insn.next_byte = (void *)(addr + insn.length);
                /* Check any instructions don't jump into target */
                if (insn_is_indirect_jump(&insn) ||
-                   insn_jump_into_range(&insn, paddr + INT3_SIZE,
-                                        RELATIVE_ADDR_SIZE))
+                   insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
+                                        DISP32_SIZE))
                        return 0;
                addr += insn.length;
        }
@@ -374,7 +372,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
         * Verify if the address gap is in 2GB range, because this uses
         * a relative jump.
         */
-       rel = (long)slot - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+       rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
        if (abs(rel) > 0x7fffffff) {
                ret = -ERANGE;
                goto err;
@@ -401,7 +399,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
        /* Set returning jmp instruction at the tail of out-of-line buffer */
        synthesize_reljump(buf + len, slot + len,
                           (u8 *)op->kp.addr + op->optinsn.size);
-       len += RELATIVEJUMP_SIZE;
+       len += JMP32_INSN_SIZE;
 
        /* We have to use text_poke() for instruction buffer because it is RO */
        text_poke(slot, buf, len);
@@ -416,49 +414,50 @@ err:
 }
 
 /*
- * Replace breakpoints (int3) with relative jumps.
+ * Replace breakpoints (INT3) with relative jumps (JMP.d32).
  * Caller must call with locking kprobe_mutex and text_mutex.
+ *
+ * The caller will have installed a regular kprobe and after that issued
+ * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
+ * the 4 bytes after the INT3 are unused and can now be overwritten.
  */
 void arch_optimize_kprobes(struct list_head *oplist)
 {
        struct optimized_kprobe *op, *tmp;
-       u8 insn_buff[RELATIVEJUMP_SIZE];
+       u8 insn_buff[JMP32_INSN_SIZE];
 
        list_for_each_entry_safe(op, tmp, oplist, list) {
                s32 rel = (s32)((long)op->optinsn.insn -
-                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+                       ((long)op->kp.addr + JMP32_INSN_SIZE));
 
                WARN_ON(kprobe_disabled(&op->kp));
 
                /* Backup instructions which will be replaced by jump address */
-               memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
-                      RELATIVE_ADDR_SIZE);
+               memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
+                      DISP32_SIZE);
 
-               insn_buff[0] = RELATIVEJUMP_OPCODE;
+               insn_buff[0] = JMP32_INSN_OPCODE;
                *(s32 *)(&insn_buff[1]) = rel;
 
-               text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, NULL);
+               text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
 
                list_del_init(&op->list);
        }
 }
 
-/* Replace a relative jump with a breakpoint (int3).  */
+/*
+ * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
+ *
+ * After that, we can restore the 4 bytes after the INT3 to undo what
+ * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
+ * unused once the INT3 lands.
+ */
 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
 {
-       u8 insn_buff[RELATIVEJUMP_SIZE];
-       u8 emulate_buff[RELATIVEJUMP_SIZE];
-
-       /* Set int3 to first byte for kprobes */
-       insn_buff[0] = BREAKPOINT_INSTRUCTION;
-       memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-
-       emulate_buff[0] = RELATIVEJUMP_OPCODE;
-       *(s32 *)(&emulate_buff[1]) = (s32)((long)op->optinsn.insn -
-                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
-
-       text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE,
-                    emulate_buff);
+       arch_arm_kprobe(&op->kp);
+       text_poke(op->kp.addr + INT3_INSN_SIZE,
+                 op->optinsn.copied_insn, DISP32_SIZE);
+       text_poke_sync();
 }
 
 /*
index 32ef1ee733b776fa74ced56708181cbb245d20eb..81045aabb6f4595d5eaec7897cd9abf5c181315d 100644 (file)
@@ -245,17 +245,13 @@ NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
 dotraplinkage void
 do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 {
-       enum ctx_state prev_state;
-
        switch (kvm_read_and_reset_pf_reason()) {
        default:
                do_page_fault(regs, error_code, address);
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                /* page is swapped out by the host. */
-               prev_state = exception_enter();
                kvm_async_pf_task_wait((u32)address, !user_mode(regs));
-               exception_exit(prev_state);
                break;
        case KVM_PV_REASON_PAGE_READY:
                rcu_irq_enter();
index b2463fcb20a8116921203fb246d3b7ffa0ef1e88..c57e1ca70fd100c715926f4f337e6be27645f304 100644 (file)
 #include <asm/desc.h>
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
+#include <asm/pgtable_areas.h>
+
+/* This is a multiple of PAGE_SIZE. */
+#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
+
+static inline void *ldt_slot_va(int slot)
+{
+       return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
+}
+
+void load_mm_ldt(struct mm_struct *mm)
+{
+       struct ldt_struct *ldt;
+
+       /* READ_ONCE synchronizes with smp_store_release */
+       ldt = READ_ONCE(mm->context.ldt);
+
+       /*
+        * Any change to mm->context.ldt is followed by an IPI to all
+        * CPUs with the mm active.  The LDT will not be freed until
+        * after the IPI is handled by all such CPUs.  This means that,
+        * if the ldt_struct changes before we return, the values we see
+        * will be safe, and the new values will be loaded before we run
+        * any user code.
+        *
+        * NB: don't try to convert this to use RCU without extreme care.
+        * We would still need IRQs off, because we don't want to change
+        * the local LDT after an IPI loaded a newer value than the one
+        * that we can see.
+        */
+
+       if (unlikely(ldt)) {
+               if (static_cpu_has(X86_FEATURE_PTI)) {
+                       if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
+                               /*
+                                * Whoops -- either the new LDT isn't mapped
+                                * (if slot == -1) or is mapped into a bogus
+                                * slot (if slot > 1).
+                                */
+                               clear_LDT();
+                               return;
+                       }
+
+                       /*
+                        * If page table isolation is enabled, ldt->entries
+                        * will not be mapped in the userspace pagetables.
+                        * Tell the CPU to access the LDT through the alias
+                        * at ldt_slot_va(ldt->slot).
+                        */
+                       set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
+               } else {
+                       set_ldt(ldt->entries, ldt->nr_entries);
+               }
+       } else {
+               clear_LDT();
+       }
+}
+
+void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
+{
+       /*
+        * Load the LDT if either the old or new mm had an LDT.
+        *
+        * An mm will never go from having an LDT to not having an LDT.  Two
+        * mms never share an LDT, so we don't gain anything by checking to
+        * see whether the LDT changed.  There's also no guarantee that
+        * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
+        * then prev->context.ldt will also be non-NULL.
+        *
+        * If we really cared, we could optimize the case where prev == next
+        * and we're exiting lazy mode.  Most of the time, if this happens,
+        * we don't actually need to reload LDTR, but modify_ldt() is mostly
+        * used by legacy code and emulators where we don't need this level of
+        * performance.
+        *
+        * This uses | instead of || because it generates better code.
+        */
+       if (unlikely((unsigned long)prev->context.ldt |
+                    (unsigned long)next->context.ldt))
+               load_mm_ldt(next);
+
+       DEBUG_LOCKS_WARN_ON(preemptible());
+}
 
 static void refresh_ldt_segments(void)
 {
index 323499f48858b87b7dc034574e53f2896b7cc395..5052ced433734be371a619ac31daa71516f539ec 100644 (file)
@@ -124,7 +124,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
        regs->ip                = new_ip;
        regs->sp                = new_sp;
        regs->flags             = X86_EFLAGS_IF;
-       force_iret();
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
index 506d66830d4d7ea70444a54f5be7aa8ad9c73860..ffd497804dbc3406426eb95589db30c90111d255 100644 (file)
@@ -394,7 +394,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
        regs->cs                = _cs;
        regs->ss                = _ss;
        regs->flags             = X86_EFLAGS_IF;
-       force_iret();
 }
 
 void
index 1daf8f2aa21f515e88243a6178d4b2709f5151eb..896d74cb5081a8c898fcd3327c7a30529a6d4370 100644 (file)
@@ -110,7 +110,7 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
        }
 
        /* use bits 31:14, 16 kB aligned */
-       rcba_base = ioremap_nocache(rcba, 0x4000);
+       rcba_base = ioremap(rcba, 0x4000);
        if (rcba_base == NULL) {
                dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
                        "cannot force enable HPET\n");
index b5ac9932bcf68edd504c1e17bc83e3f6b973b27b..2441b64d061f3ef8c541f9bbb1f0f6873d6af4b8 100644 (file)
 #include <asm/kaslr.h>
 #include <asm/mce.h>
 #include <asm/mtrr.h>
+#include <asm/realmode.h>
 #include <asm/olpc_ofw.h>
 #include <asm/pci-direct.h>
 #include <asm/prom.h>
 #include <asm/proto.h>
 #include <asm/unwind.h>
 #include <asm/vsyscall.h>
+#include <linux/vmalloc.h>
 
 /*
  * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
index 8eb7193e158dd8741beb11eece6e5f11ca28b0dd..8a29573851a3273b2ca9831f8dab87f41d1f1c76 100644 (file)
@@ -151,8 +151,6 @@ static int restore_sigcontext(struct pt_regs *regs,
 
        err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
 
-       force_iret();
-
        return err;
 }
 
index 01f0e2263b86b2b7f87df7e09f03f69229e0db2f..298fc1edd9c9521ca976810aba3775dfa5dbaaf7 100644 (file)
@@ -90,11 +90,11 @@ __init int create_simplefb(const struct screen_info *si,
        if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
                size <<= 16;
        length = mode->height * mode->stride;
-       length = PAGE_ALIGN(length);
        if (length > size) {
                printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
                return -EINVAL;
        }
+       length = PAGE_ALIGN(length);
 
        /* setup IORESOURCE_MEM as framebuffer memory */
        memset(&res, 0, sizeof(res));
index 4c61f07138320617d6dca23a450e82ce1fd622f2..b89f6ac6a0c01f0c4fa1c8ccfa954fc6303f1e39 100644 (file)
@@ -354,7 +354,7 @@ static ssize_t tboot_log_read(struct file *file, char __user *user_buf, size_t c
        void *kbuf;
        int ret = -EFAULT;
 
-       log_base = ioremap_nocache(TBOOT_SERIAL_LOG_ADDR, TBOOT_SERIAL_LOG_SIZE);
+       log_base = ioremap(TBOOT_SERIAL_LOG_ADDR, TBOOT_SERIAL_LOG_SIZE);
        if (!log_base)
                return ret;
 
index 05da6b5b167bc4f72211f9fa98bd19f9aaf1dde2..f19de6f45d48912b4be166830842a3cce5b380a2 100644 (file)
@@ -572,15 +572,6 @@ NOKPROBE_SYMBOL(do_general_protection);
 
 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 {
-#ifdef CONFIG_DYNAMIC_FTRACE
-       /*
-        * ftrace must be first, everything else may cause a recursive crash.
-        * See note by declaration of modifying_ftrace_code in ftrace.c
-        */
-       if (unlikely(atomic_read(&modifying_ftrace_code)) &&
-           ftrace_int3_handler(regs))
-               return;
-#endif
        if (poke_int3_handler(regs))
                return;
 
index 332ae6530fa8813e5c6dda09f6457e1ded0eb436..e9cc182aa97eed1d6d23858f5a89b2f663b1c6e9 100644 (file)
@@ -187,6 +187,8 @@ static struct orc_entry *orc_find(unsigned long ip)
        return orc_ftrace_find(ip);
 }
 
+#ifdef CONFIG_MODULES
+
 static void orc_sort_swap(void *_a, void *_b, int size)
 {
        struct orc_entry *orc_a, *orc_b;
@@ -229,7 +231,6 @@ static int orc_sort_cmp(const void *_a, const void *_b)
        return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
 }
 
-#ifdef CONFIG_MODULES
 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
                        void *_orc, size_t orc_size)
 {
@@ -273,9 +274,11 @@ void __init unwind_init(void)
                return;
        }
 
-       /* Sort the .orc_unwind and .orc_unwind_ip tables: */
-       sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
-            orc_sort_swap);
+       /*
+        * Note, the orc_unwind and orc_unwind_ip tables were already
+        * sorted at build time via the 'sorttable' tool.
+        * It's ready for binary search straight away, no need to sort it.
+        */
 
        /* Initialize the fast lookup table: */
        lookup_num_blocks = orc_lookup_end - orc_lookup;
index a76c12b38e925a378cab92dba4ca5638dcf29645..91d55454e7022aa7b93a1a1da1202cded5dc3e99 100644 (file)
@@ -381,7 +381,6 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
                mark_screen_rdonly(tsk->mm);
 
        memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
-       force_iret();
        return regs->ax;
 }
 
index 3a1a819da1376c6c4e4aa0189bf33b64470112b7..e3296aa028feb766ba79062d2acfe885becfd59b 100644 (file)
@@ -193,12 +193,10 @@ SECTIONS
                __vvar_beginning_hack = .;
 
                /* Place all vvars at the offsets in asm/vvar.h. */
-#define EMIT_VVAR(name, offset)                        \
+#define EMIT_VVAR(name, offset)                                \
                . = __vvar_beginning_hack + offset;     \
                *(.vvar_ ## name)
-#define __VVAR_KERNEL_LDS
 #include <asm/vvar.h>
-#undef __VVAR_KERNEL_LDS
 #undef EMIT_VVAR
 
                /*
index ce89430a7f8011aa5d49e1ee99dcae19a9862cf1..23e25f3034c2d43a7df2ac1971ef51c0973fd9a8 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/irq.h>
 #include <asm/io_apic.h>
 #include <asm/hpet.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/tsc.h>
 #include <asm/iommu.h>
 #include <asm/mach_traps.h>
index cfafa320a8cf75e8437153fc36310a1f5bc3d4b3..cf55629ff0ff642ea85fb5783f2fd4df678b9d7c 100644 (file)
@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
                        entry->edx |= F(SPEC_CTRL);
                if (boot_cpu_has(X86_FEATURE_STIBP))
                        entry->edx |= F(INTEL_STIBP);
-               if (boot_cpu_has(X86_FEATURE_SSBD))
+               if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   boot_cpu_has(X86_FEATURE_AMD_SSBD))
                        entry->edx |= F(SPEC_CTRL_SSBD);
                /*
                 * We emulate ARCH_CAPABILITIES in software even
@@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
                        entry->ebx |= F(AMD_IBRS);
                if (boot_cpu_has(X86_FEATURE_STIBP))
                        entry->ebx |= F(AMD_STIBP);
-               if (boot_cpu_has(X86_FEATURE_SSBD))
+               if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   boot_cpu_has(X86_FEATURE_AMD_SSBD))
                        entry->ebx |= F(AMD_SSBD);
                if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
                        entry->ebx |= F(AMD_SSB_NO);
index 6f92b40d798cab7d4b9f4a451621319e687e29df..a32b847a8089dd0238f6c6aacdefd26a12b297f8 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/kthread.h>
 
 #include <asm/page.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/cmpxchg.h>
 #include <asm/e820/api.h>
 #include <asm/io.h>
index 337830d7a59c75adfb1fc81be3ee1c51e092b18e..7ff00ea64e4fe558f11a8f4c8961551edf35d84b 100644 (file)
 SYM_FUNC_START_ALIAS(memmove)
 SYM_FUNC_START(__memmove)
 
-       /* Handle more 32 bytes in loop */
        mov %rdi, %rax
-       cmp $0x20, %rdx
-       jb      1f
 
        /* Decide forward/backward copy mode */
        cmp %rdi, %rsi
@@ -42,7 +39,9 @@ SYM_FUNC_START(__memmove)
        cmp %rdi, %r8
        jg 2f
 
+       /* FSRM implies ERMS => no length checks, do the copy directly */
 .Lmemmove_begin_forward:
+       ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
        ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
 
        /*
@@ -114,6 +113,8 @@ SYM_FUNC_START(__memmove)
         */
        .p2align 4
 2:
+       cmp $0x20, %rdx
+       jb 1f
        cmp $680, %rdx
        jb 6f
        cmp %dil, %sil
index 3b89c201ac267d8e1dba0af2acd4b39705c3f563..345848f270e3b445c3d31a0d8cbee9278ffadb82 100644 (file)
@@ -12,8 +12,10 @@ CFLAGS_REMOVE_mem_encrypt.o          = -pg
 CFLAGS_REMOVE_mem_encrypt_identity.o   = -pg
 endif
 
-obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-           pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
+obj-y                          :=  init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
+                                   pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
+
+obj-y                          += pat/
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
@@ -23,8 +25,6 @@ CFLAGS_mem_encrypt_identity.o := $(nostackp)
 
 CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
 
-obj-$(CONFIG_X86_PAT)          += pat_interval.o
-
 obj-$(CONFIG_X86_32)           += pgtable_32.o iomap_32.o
 
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
index 304d31d8cbbc5c6ba18c9b3d55a4df532a245272..fa4ea09593abb0b3f63cee1b910f78f4828789b6 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/efi.h>                   /* efi_recover_from_page_fault()*/
 #include <asm/desc.h>                  /* store_idt(), ...             */
 #include <asm/cpu_entry_area.h>                /* exception stack              */
+#include <asm/pgtable_areas.h>         /* VMALLOC_START, ...           */
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -1486,27 +1487,6 @@ good_area:
 }
 NOKPROBE_SYMBOL(do_user_addr_fault);
 
-/*
- * Explicitly marked noinline such that the function tracer sees this as the
- * page_fault entry point.
- */
-static noinline void
-__do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
-               unsigned long address)
-{
-       prefetchw(&current->mm->mmap_sem);
-
-       if (unlikely(kmmio_fault(regs, address)))
-               return;
-
-       /* Was the fault on kernel-controlled part of the address space? */
-       if (unlikely(fault_in_kernel_space(address)))
-               do_kern_addr_fault(regs, hw_error_code, address);
-       else
-               do_user_addr_fault(regs, hw_error_code, address);
-}
-NOKPROBE_SYMBOL(__do_page_fault);
-
 static __always_inline void
 trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
                         unsigned long address)
@@ -1521,13 +1501,19 @@ trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
 }
 
 dotraplinkage void
-do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
+               unsigned long address)
 {
-       enum ctx_state prev_state;
+       prefetchw(&current->mm->mmap_sem);
+       trace_page_fault_entries(regs, hw_error_code, address);
 
-       prev_state = exception_enter();
-       trace_page_fault_entries(regs, error_code, address);
-       __do_page_fault(regs, error_code, address);
-       exception_exit(prev_state);
+       if (unlikely(kmmio_fault(regs, address)))
+               return;
+
+       /* Was the fault on kernel-controlled part of the address space? */
+       if (unlikely(fault_in_kernel_space(address)))
+               do_kern_addr_fault(regs, hw_error_code, address);
+       else
+               do_user_addr_fault(regs, hw_error_code, address);
 }
 NOKPROBE_SYMBOL(do_page_fault);
index 930edeb41ec33417755d38293f85ad1717cdee90..23df4885bbede4a72763e95b5dbab74bcfb1c3a5 100644 (file)
@@ -52,6 +52,7 @@
 #include <asm/page_types.h>
 #include <asm/cpu_entry_area.h>
 #include <asm/init.h>
+#include <asm/pgtable_areas.h>
 
 #include "mm_internal.h"
 
@@ -865,43 +866,13 @@ void arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct zone *zone;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
 
 int kernel_set_to_readonly __read_mostly;
 
-void set_kernel_text_rw(void)
-{
-       unsigned long start = PFN_ALIGN(_text);
-       unsigned long size = PFN_ALIGN(_etext) - start;
-
-       if (!kernel_set_to_readonly)
-               return;
-
-       pr_debug("Set kernel text: %lx - %lx for read write\n",
-                start, start+size);
-
-       set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
-       unsigned long start = PFN_ALIGN(_text);
-       unsigned long size = PFN_ALIGN(_etext) - start;
-
-       if (!kernel_set_to_readonly)
-               return;
-
-       pr_debug("Set kernel text: %lx - %lx for read only\n",
-                start, start+size);
-
-       set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
-}
-
 static void mark_nxdata_nx(void)
 {
        /*
index dcb9bc961b39c3e2928eb4d15f681c8dad29591e..abbdecb75fad8fe2d7b1f671fefc18b0c27e6e40 100644 (file)
@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
-       struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
-       struct zone *zone = page_zone(page);
 
-       __remove_pages(zone, start_pfn, nr_pages, altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
        kernel_physical_mapping_remove(start, start + size);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
@@ -1260,42 +1258,6 @@ void __init mem_init(void)
 
 int kernel_set_to_readonly;
 
-void set_kernel_text_rw(void)
-{
-       unsigned long start = PFN_ALIGN(_text);
-       unsigned long end = PFN_ALIGN(_etext);
-
-       if (!kernel_set_to_readonly)
-               return;
-
-       pr_debug("Set kernel text: %lx - %lx for read write\n",
-                start, end);
-
-       /*
-        * Make the kernel identity mapping for text RW. Kernel text
-        * mapping will always be RO. Refer to the comment in
-        * static_protections() in pageattr.c
-        */
-       set_memory_rw(start, (end - start) >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
-       unsigned long start = PFN_ALIGN(_text);
-       unsigned long end = PFN_ALIGN(_etext);
-
-       if (!kernel_set_to_readonly)
-               return;
-
-       pr_debug("Set kernel text: %lx - %lx for read only\n",
-                start, end);
-
-       /*
-        * Set the kernel identity mapping for text RO.
-        */
-       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
-}
-
 void mark_rodata_ro(void)
 {
        unsigned long start = PFN_ALIGN(_text);
index 6748b4c2baff32f1cb1029beef1de8906a0d9ede..f60398aeb6445fb4537fb7be3fdac7f12e411f81 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 #include <asm/iomap.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <linux/export.h>
 #include <linux/highmem.h>
 
@@ -26,7 +26,7 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
        if (!is_io_mapping_possible(base, size))
                return -EINVAL;
 
-       ret = io_reserve_memtype(base, base + size, &pcm);
+       ret = memtype_reserve_io(base, base + size, &pcm);
        if (ret)
                return ret;
 
@@ -40,7 +40,7 @@ EXPORT_SYMBOL_GPL(iomap_create_wc);
 
 void iomap_free(resource_size_t base, unsigned long size)
 {
-       io_free_memtype(base, base + size);
+       memtype_free_io(base, base + size);
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
index b3a2936377b56babf1bf6cdfc62afe0bc85d926d..44e4beb4239f93bb83876bd21ab6f8007460be71 100644 (file)
@@ -24,7 +24,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/setup.h>
 
 #include "physaddr.h"
@@ -196,10 +196,10 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
        phys_addr &= PHYSICAL_PAGE_MASK;
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
-       retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
+       retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
                                                pcm, &new_pcm);
        if (retval) {
-               printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
+               printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
                return NULL;
        }
 
@@ -255,7 +255,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
        area->phys_addr = phys_addr;
        vaddr = (unsigned long) area->addr;
 
-       if (kernel_map_sync_memtype(phys_addr, size, pcm))
+       if (memtype_kernel_map_sync(phys_addr, size, pcm))
                goto err_free_area;
 
        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
@@ -275,7 +275,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
 err_free_area:
        free_vm_area(area);
 err_free_memtype:
-       free_memtype(phys_addr, phys_addr + size);
+       memtype_free(phys_addr, phys_addr + size);
        return NULL;
 }
 
@@ -451,7 +451,7 @@ void iounmap(volatile void __iomem *addr)
                return;
        }
 
-       free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
+       memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
 
        /* Finally remove it */
        o = remove_vm_area((void __force *)addr);
diff --git a/arch/x86/mm/pat/Makefile b/arch/x86/mm/pat/Makefile
new file mode 100644 (file)
index 0000000..ea464c9
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y                          := set_memory.o memtype.o
+
+obj-$(CONFIG_X86_PAT)          += memtype_interval.o
similarity index 84%
rename from arch/x86/mm/pat.c
rename to arch/x86/mm/pat/memtype.c
index 2d758e19ef22d4653386b0a827dfeea7f0695f98..394be8611748ade72487c21b512d0cc26016c7c5 100644 (file)
@@ -1,11 +1,34 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Handle caching attributes in page tables (PAT)
+ * Page Attribute Table (PAT) support: handle memory caching attributes in page tables.
  *
  * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  *          Suresh B Siddha <suresh.b.siddha@intel.com>
  *
  * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
+ *
+ * Basic principles:
+ *
+ * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and
+ * the kernel to set one of a handful of 'caching type' attributes for physical
+ * memory ranges: uncached, write-combining, write-through, write-protected,
+ * and the most commonly used and default attribute: write-back caching.
+ *
+ * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is
+ * a hardware interface to enumerate a limited number of physical memory ranges
+ * and set their caching attributes explicitly, programmed into the CPU via MSRs.
+ * Even modern CPUs have MTRRs enabled - but these are typically not touched
+ * by the kernel or by user-space (such as the X server), we rely on PAT for any
+ * additional cache attribute logic.
+ *
+ * PAT doesn't work via explicit memory ranges, but uses page table entries to add
+ * cache attribute information to the mapped memory range: there's 3 bits used,
+ * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the
+ * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT).
+ *
+ * ( There's a metric ton of finer details, such as compatibility with CPU quirks
+ *   that only support 4 types of PAT entries, and interaction with MTRRs, see
+ *   below for details. )
  */
 
 #include <linux/seq_file.h>
 #include <asm/mtrr.h>
 #include <asm/page.h>
 #include <asm/msr.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/io.h>
 
-#include "pat_internal.h"
-#include "mm_internal.h"
+#include "memtype.h"
+#include "../mm_internal.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "" fmt
 
-static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_bp_initialized;
 static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
-static bool __read_mostly pat_initialized;
-static bool __read_mostly init_cm_done;
+static bool __read_mostly pat_bp_enabled;
+static bool __read_mostly pat_cm_initialized;
 
-void pat_disable(const char *reason)
+/*
+ * PAT support is enabled by default, but can be disabled for
+ * various user-requested or hardware-forced reasons:
+ */
+void pat_disable(const char *msg_reason)
 {
        if (pat_disabled)
                return;
 
-       if (boot_cpu_done) {
+       if (pat_bp_initialized) {
                WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
                return;
        }
 
        pat_disabled = true;
-       pr_info("x86/PAT: %s\n", reason);
+       pr_info("x86/PAT: %s\n", msg_reason);
 }
 
 static int __init nopat(char *str)
 {
-       pat_disable("PAT support disabled.");
+       pat_disable("PAT support disabled via boot option.");
        return 0;
 }
 early_param("nopat", nopat);
 
 bool pat_enabled(void)
 {
-       return pat_initialized;
+       return pat_bp_enabled;
 }
 EXPORT_SYMBOL_GPL(pat_enabled);
 
@@ -197,6 +224,8 @@ static void __init_cache_modes(u64 pat)
        char pat_msg[33];
        int i;
 
+       WARN_ON_ONCE(pat_cm_initialized);
+
        pat_msg[32] = 0;
        for (i = 7; i >= 0; i--) {
                cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
@@ -205,28 +234,28 @@ static void __init_cache_modes(u64 pat)
        }
        pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
 
-       init_cm_done = true;
+       pat_cm_initialized = true;
 }
 
 #define PAT(x, y)      ((u64)PAT_ ## y << ((x)*8))
 
-static void pat_bsp_init(u64 pat)
+static void pat_bp_init(u64 pat)
 {
        u64 tmp_pat;
 
        if (!boot_cpu_has(X86_FEATURE_PAT)) {
-               pat_disable("PAT not supported by CPU.");
+               pat_disable("PAT not supported by the CPU.");
                return;
        }
 
        rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
        if (!tmp_pat) {
-               pat_disable("PAT MSR is 0, disabled.");
+               pat_disable("PAT support disabled by the firmware.");
                return;
        }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
-       pat_initialized = true;
+       pat_bp_enabled = true;
 
        __init_cache_modes(pat);
 }
@@ -248,7 +277,7 @@ void init_cache_modes(void)
 {
        u64 pat = 0;
 
-       if (init_cm_done)
+       if (pat_cm_initialized)
                return;
 
        if (boot_cpu_has(X86_FEATURE_PAT)) {
@@ -291,7 +320,7 @@ void init_cache_modes(void)
 }
 
 /**
- * pat_init - Initialize PAT MSR and PAT table
+ * pat_init - Initialize the PAT MSR and PAT table on the current CPU
  *
  * This function initializes PAT MSR and PAT table with an OS-defined value
  * to enable additional cache attributes, WC, WT and WP.
@@ -305,6 +334,10 @@ void pat_init(void)
        u64 pat;
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
+#ifndef CONFIG_X86_PAT
+       pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
+#endif
+
        if (pat_disabled)
                return;
 
@@ -364,9 +397,9 @@ void pat_init(void)
                      PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
        }
 
-       if (!boot_cpu_done) {
-               pat_bsp_init(pat);
-               boot_cpu_done = true;
+       if (!pat_bp_initialized) {
+               pat_bp_init(pat);
+               pat_bp_initialized = true;
        } else {
                pat_ap_init(pat);
        }
@@ -542,10 +575,10 @@ static u64 sanitize_phys(u64 address)
  * available type in new_type in case of no error. In case of any error
  * it will return a negative return value.
  */
-int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
+int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
                    enum page_cache_mode *new_type)
 {
-       struct memtype *new;
+       struct memtype *entry_new;
        enum page_cache_mode actual_type;
        int is_range_ram;
        int err = 0;
@@ -593,22 +626,22 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
                return -EINVAL;
        }
 
-       new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
-       if (!new)
+       entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
+       if (!entry_new)
                return -ENOMEM;
 
-       new->start      = start;
-       new->end        = end;
-       new->type       = actual_type;
+       entry_new->start = start;
+       entry_new->end   = end;
+       entry_new->type  = actual_type;
 
        spin_lock(&memtype_lock);
 
-       err = memtype_check_insert(new, new_type);
+       err = memtype_check_insert(entry_new, new_type);
        if (err) {
-               pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+               pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
                        start, end - 1,
-                       cattr_name(new->type), cattr_name(req_type));
-               kfree(new);
+                       cattr_name(entry_new->type), cattr_name(req_type));
+               kfree(entry_new);
                spin_unlock(&memtype_lock);
 
                return err;
@@ -616,18 +649,17 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
 
        spin_unlock(&memtype_lock);
 
-       dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
-               start, end - 1, cattr_name(new->type), cattr_name(req_type),
+       dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
+               start, end - 1, cattr_name(entry_new->type), cattr_name(req_type),
                new_type ? cattr_name(*new_type) : "-");
 
        return err;
 }
 
-int free_memtype(u64 start, u64 end)
+int memtype_free(u64 start, u64 end)
 {
-       int err = -EINVAL;
        int is_range_ram;
-       struct memtype *entry;
+       struct memtype *entry_old;
 
        if (!pat_enabled())
                return 0;
@@ -640,28 +672,24 @@ int free_memtype(u64 start, u64 end)
                return 0;
 
        is_range_ram = pat_pagerange_is_ram(start, end);
-       if (is_range_ram == 1) {
-
-               err = free_ram_pages_type(start, end);
-
-               return err;
-       } else if (is_range_ram < 0) {
+       if (is_range_ram == 1)
+               return free_ram_pages_type(start, end);
+       if (is_range_ram < 0)
                return -EINVAL;
-       }
 
        spin_lock(&memtype_lock);
-       entry = memtype_erase(start, end);
+       entry_old = memtype_erase(start, end);
        spin_unlock(&memtype_lock);
 
-       if (IS_ERR(entry)) {
+       if (IS_ERR(entry_old)) {
                pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid, start, end - 1);
                return -EINVAL;
        }
 
-       kfree(entry);
+       kfree(entry_old);
 
-       dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
+       dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1);
 
        return 0;
 }
@@ -700,6 +728,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
                rettype = _PAGE_CACHE_MODE_UC_MINUS;
 
        spin_unlock(&memtype_lock);
+
        return rettype;
 }
 
@@ -723,7 +752,7 @@ bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
 EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
 
 /**
- * io_reserve_memtype - Request a memory type mapping for a region of memory
+ * memtype_reserve_io - Request a memory type mapping for a region of memory
  * @start: start (physical address) of the region
  * @end: end (physical address) of the region
  * @type: A pointer to memtype, with requested type. On success, requested
@@ -732,7 +761,7 @@ EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
  * On success, returns 0
  * On failure, returns non-zero
  */
-int io_reserve_memtype(resource_size_t start, resource_size_t end,
+int memtype_reserve_io(resource_size_t start, resource_size_t end,
                        enum page_cache_mode *type)
 {
        resource_size_t size = end - start;
@@ -742,47 +771,47 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
 
        WARN_ON_ONCE(iomem_map_sanity_check(start, size));
 
-       ret = reserve_memtype(start, end, req_type, &new_type);
+       ret = memtype_reserve(start, end, req_type, &new_type);
        if (ret)
                goto out_err;
 
        if (!is_new_memtype_allowed(start, size, req_type, new_type))
                goto out_free;
 
-       if (kernel_map_sync_memtype(start, size, new_type) < 0)
+       if (memtype_kernel_map_sync(start, size, new_type) < 0)
                goto out_free;
 
        *type = new_type;
        return 0;
 
 out_free:
-       free_memtype(start, end);
+       memtype_free(start, end);
        ret = -EBUSY;
 out_err:
        return ret;
 }
 
 /**
- * io_free_memtype - Release a memory type mapping for a region of memory
+ * memtype_free_io - Release a memory type mapping for a region of memory
  * @start: start (physical address) of the region
  * @end: end (physical address) of the region
  */
-void io_free_memtype(resource_size_t start, resource_size_t end)
+void memtype_free_io(resource_size_t start, resource_size_t end)
 {
-       free_memtype(start, end);
+       memtype_free(start, end);
 }
 
 int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
 {
        enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
 
-       return io_reserve_memtype(start, start + size, &type);
+       return memtype_reserve_io(start, start + size, &type);
 }
 EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
 
 void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
 {
-       io_free_memtype(start, start + size);
+       memtype_free_io(start, start + size);
 }
 EXPORT_SYMBOL(arch_io_free_memtype_wc);
 
@@ -839,10 +868,10 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 }
 
 /*
- * Change the memory type for the physial address range in kernel identity
+ * Change the memory type for the physical address range in kernel identity
  * mapping space if that range is a part of identity map.
  */
-int kernel_map_sync_memtype(u64 base, unsigned long size,
+int memtype_kernel_map_sync(u64 base, unsigned long size,
                            enum page_cache_mode pcm)
 {
        unsigned long id_sz;
@@ -851,15 +880,14 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
                return 0;
 
        /*
-        * some areas in the middle of the kernel identity range
-        * are not mapped, like the PCI space.
+        * Some areas in the middle of the kernel identity range
+        * are not mapped, for example the PCI space.
         */
        if (!page_is_ram(base >> PAGE_SHIFT))
                return 0;
 
        id_sz = (__pa(high_memory-1) <= base + size) ?
-                               __pa(high_memory) - base :
-                               size;
+                               __pa(high_memory) - base : size;
 
        if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
                pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
@@ -873,7 +901,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
 
 /*
  * Internal interface to reserve a range of physical memory with prot.
- * Reserved non RAM regions only and after successful reserve_memtype,
+ * Reserved non RAM regions only and after successful memtype_reserve,
  * this func also keeps identity mapping (if any) in sync with this new prot.
  */
 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
@@ -910,14 +938,14 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                return 0;
        }
 
-       ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
+       ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm);
        if (ret)
                return ret;
 
        if (pcm != want_pcm) {
                if (strict_prot ||
                    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
-                       free_memtype(paddr, paddr + size);
+                       memtype_free(paddr, paddr + size);
                        pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
                               current->comm, current->pid,
                               cattr_name(want_pcm),
@@ -935,8 +963,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                                     cachemode2protval(pcm));
        }
 
-       if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
-               free_memtype(paddr, paddr + size);
+       if (memtype_kernel_map_sync(paddr, size, pcm) < 0) {
+               memtype_free(paddr, paddr + size);
                return -EINVAL;
        }
        return 0;
@@ -952,7 +980,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
 
        is_ram = pat_pagerange_is_ram(paddr, paddr + size);
        if (is_ram == 0)
-               free_memtype(paddr, paddr + size);
+               memtype_free(paddr, paddr + size);
 }
 
 /*
@@ -1099,25 +1127,30 @@ EXPORT_SYMBOL_GPL(pgprot_writethrough);
 
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
 
+/*
+ * We are allocating a temporary printout-entry to be passed
+ * between seq_start()/next() and seq_show():
+ */
 static struct memtype *memtype_get_idx(loff_t pos)
 {
-       struct memtype *print_entry;
+       struct memtype *entry_print;
        int ret;
 
-       print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
-       if (!print_entry)
+       entry_print  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
+       if (!entry_print)
                return NULL;
 
        spin_lock(&memtype_lock);
-       ret = memtype_copy_nth_element(print_entry, pos);
+       ret = memtype_copy_nth_element(entry_print, pos);
        spin_unlock(&memtype_lock);
 
-       if (!ret) {
-               return print_entry;
-       } else {
-               kfree(print_entry);
+       /* Free it on error: */
+       if (ret) {
+               kfree(entry_print);
                return NULL;
        }
+
+       return entry_print;
 }
 
 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1142,11 +1175,14 @@ static void memtype_seq_stop(struct seq_file *seq, void *v)
 
 static int memtype_seq_show(struct seq_file *seq, void *v)
 {
-       struct memtype *print_entry = (struct memtype *)v;
+       struct memtype *entry_print = (struct memtype *)v;
+
+       seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n",
+                       entry_print->start,
+                       entry_print->end,
+                       cattr_name(entry_print->type));
 
-       seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
-                       print_entry->start, print_entry->end);
-       kfree(print_entry);
+       kfree(entry_print);
 
        return 0;
 }
@@ -1178,7 +1214,6 @@ static int __init pat_memtype_list_init(void)
        }
        return 0;
 }
-
 late_initcall(pat_memtype_list_init);
 
 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
similarity index 81%
rename from arch/x86/mm/pat_internal.h
rename to arch/x86/mm/pat/memtype.h
index 79a06684349e211950b3077279e778a1f0579d3a..cacecdbceb55ecc0fdf28ceec58573f66cc391a9 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PAT_INTERNAL_H_
-#define __PAT_INTERNAL_H_
+#ifndef __MEMTYPE_H_
+#define __MEMTYPE_H_
 
 extern int pat_debug_enable;
 
@@ -29,13 +29,13 @@ static inline char *cattr_name(enum page_cache_mode pcm)
 }
 
 #ifdef CONFIG_X86_PAT
-extern int memtype_check_insert(struct memtype *new,
+extern int memtype_check_insert(struct memtype *entry_new,
                                enum page_cache_mode *new_type);
 extern struct memtype *memtype_erase(u64 start, u64 end);
 extern struct memtype *memtype_lookup(u64 addr);
-extern int memtype_copy_nth_element(struct memtype *out, loff_t pos);
+extern int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos);
 #else
-static inline int memtype_check_insert(struct memtype *new,
+static inline int memtype_check_insert(struct memtype *entry_new,
                                       enum page_cache_mode *new_type)
 { return 0; }
 static inline struct memtype *memtype_erase(u64 start, u64 end)
@@ -46,4 +46,4 @@ static inline int memtype_copy_nth_element(struct memtype *out, loff_t pos)
 { return 0; }
 #endif
 
-#endif /* __PAT_INTERNAL_H_ */
+#endif /* __MEMTYPE_H_ */
diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
new file mode 100644 (file)
index 0000000..a07e488
--- /dev/null
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handle caching attributes in page tables (PAT)
+ *
+ * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ *          Suresh B Siddha <suresh.b.siddha@intel.com>
+ *
+ * Interval tree used to store the PAT memory type reservations.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/memtype.h>
+
+#include "memtype.h"
+
+/*
+ * The memtype tree keeps track of memory type for specific
+ * physical memory areas. Without proper tracking, conflicting memory
+ * types in different mappings can cause CPU cache corruption.
+ *
+ * The tree is an interval tree (augmented rbtree) which tree is ordered
+ * by the starting address. The tree can contain multiple entries for
+ * different regions which overlap. All the aliases have the same
+ * cache attributes of course, as enforced by the PAT logic.
+ *
+ * memtype_lock protects the rbtree.
+ */
+
+static inline u64 interval_start(struct memtype *entry)
+{
+       return entry->start;
+}
+
+static inline u64 interval_end(struct memtype *entry)
+{
+       return entry->end - 1;
+}
+
+INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
+                    interval_start, interval_end,
+                    static, interval)
+
+static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
+
+enum {
+       MEMTYPE_EXACT_MATCH     = 0,
+       MEMTYPE_END_MATCH       = 1
+};
+
+static struct memtype *memtype_match(u64 start, u64 end, int match_type)
+{
+       struct memtype *entry_match;
+
+       entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
+
+       while (entry_match != NULL && entry_match->start < end) {
+               if ((match_type == MEMTYPE_EXACT_MATCH) &&
+                   (entry_match->start == start) && (entry_match->end == end))
+                       return entry_match;
+
+               if ((match_type == MEMTYPE_END_MATCH) &&
+                   (entry_match->start < start) && (entry_match->end == end))
+                       return entry_match;
+
+               entry_match = interval_iter_next(entry_match, start, end-1);
+       }
+
+       return NULL; /* Returns NULL if there is no match */
+}
+
+static int memtype_check_conflict(u64 start, u64 end,
+                                 enum page_cache_mode reqtype,
+                                 enum page_cache_mode *newtype)
+{
+       struct memtype *entry_match;
+       enum page_cache_mode found_type = reqtype;
+
+       entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
+       if (entry_match == NULL)
+               goto success;
+
+       if (entry_match->type != found_type && newtype == NULL)
+               goto failure;
+
+       dprintk("Overlap at 0x%Lx-0x%Lx\n", entry_match->start, entry_match->end);
+       found_type = entry_match->type;
+
+       entry_match = interval_iter_next(entry_match, start, end-1);
+       while (entry_match) {
+               if (entry_match->type != found_type)
+                       goto failure;
+
+               entry_match = interval_iter_next(entry_match, start, end-1);
+       }
+success:
+       if (newtype)
+               *newtype = found_type;
+
+       return 0;
+
+failure:
+       pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+               current->comm, current->pid, start, end,
+               cattr_name(found_type), cattr_name(entry_match->type));
+
+       return -EBUSY;
+}
+
+int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_type)
+{
+       int err = 0;
+
+       err = memtype_check_conflict(entry_new->start, entry_new->end, entry_new->type, ret_type);
+       if (err)
+               return err;
+
+       if (ret_type)
+               entry_new->type = *ret_type;
+
+       interval_insert(entry_new, &memtype_rbroot);
+       return 0;
+}
+
+struct memtype *memtype_erase(u64 start, u64 end)
+{
+       struct memtype *entry_old;
+
+       /*
+        * Since the memtype_rbroot tree allows overlapping ranges,
+        * memtype_erase() checks with EXACT_MATCH first, i.e. free
+        * a whole node for the munmap case.  If no such entry is found,
+        * it then checks with END_MATCH, i.e. shrink the size of a node
+        * from the end for the mremap case.
+        */
+       entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
+       if (!entry_old) {
+               entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
+               if (!entry_old)
+                       return ERR_PTR(-EINVAL);
+       }
+
+       if (entry_old->start == start) {
+               /* munmap: erase this node */
+               interval_remove(entry_old, &memtype_rbroot);
+       } else {
+               /* mremap: update the end value of this node */
+               interval_remove(entry_old, &memtype_rbroot);
+               entry_old->end = start;
+               interval_insert(entry_old, &memtype_rbroot);
+
+               return NULL;
+       }
+
+       return entry_old;
+}
+
+struct memtype *memtype_lookup(u64 addr)
+{
+       return interval_iter_first(&memtype_rbroot, addr, addr + PAGE_SIZE-1);
+}
+
+/*
+ * Debugging helper, copy the Nth entry of the tree into a
+ * a copy for printout. This allows us to print out the tree
+ * via debugfs, without holding the memtype_lock too long:
+ */
+#ifdef CONFIG_DEBUG_FS
+int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos)
+{
+       struct memtype *entry_match;
+       int i = 1;
+
+       entry_match = interval_iter_first(&memtype_rbroot, 0, ULONG_MAX);
+
+       while (entry_match && pos != i) {
+               entry_match = interval_iter_next(entry_match, 0, ULONG_MAX);
+               i++;
+       }
+
+       if (entry_match) { /* pos == i */
+               *entry_out = *entry_match;
+               return 0;
+       } else {
+               return 1;
+       }
+}
+#endif
similarity index 98%
rename from arch/x86/mm/pageattr.c
rename to arch/x86/mm/pat/set_memory.c
index 1b99ad05b117784415dfe88558431dbf093ffcdd..62a8ebe72a52f817f1aa3ea24ae04937c9d6301b 100644 (file)
 #include <linux/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/proto.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/set_memory.h>
 
-#include "mm_internal.h"
+#include "../mm_internal.h"
 
 /*
  * The current flushing context - we pass it instead of 5 arguments:
@@ -331,7 +331,7 @@ static void cpa_flush_all(unsigned long cache)
        on_each_cpu(__cpa_flush_all, (void *) cache, 1);
 }
 
-void __cpa_flush_tlb(void *data)
+static void __cpa_flush_tlb(void *data)
 {
        struct cpa_data *cpa = data;
        unsigned int i;
@@ -1801,7 +1801,7 @@ int set_memory_uc(unsigned long addr, int numpages)
        /*
         * for now UC MINUS. see comments in ioremap()
         */
-       ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
+       ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
                              _PAGE_CACHE_MODE_UC_MINUS, NULL);
        if (ret)
                goto out_err;
@@ -1813,7 +1813,7 @@ int set_memory_uc(unsigned long addr, int numpages)
        return 0;
 
 out_free:
-       free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+       memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
 out_err:
        return ret;
 }
@@ -1839,14 +1839,14 @@ int set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
 
-       ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
+       ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
                _PAGE_CACHE_MODE_WC, NULL);
        if (ret)
                return ret;
 
        ret = _set_memory_wc(addr, numpages);
        if (ret)
-               free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+               memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
 
        return ret;
 }
@@ -1873,7 +1873,7 @@ int set_memory_wb(unsigned long addr, int numpages)
        if (ret)
                return ret;
 
-       free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+       memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
        return 0;
 }
 EXPORT_SYMBOL(set_memory_wb);
@@ -2014,7 +2014,7 @@ static int _set_pages_array(struct page **pages, int numpages,
                        continue;
                start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
-               if (reserve_memtype(start, end, new_type, NULL))
+               if (memtype_reserve(start, end, new_type, NULL))
                        goto err_out;
        }
 
@@ -2040,7 +2040,7 @@ err_out:
                        continue;
                start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
-               free_memtype(start, end);
+               memtype_free(start, end);
        }
        return -EINVAL;
 }
@@ -2089,7 +2089,7 @@ int set_pages_array_wb(struct page **pages, int numpages)
                        continue;
                start = page_to_pfn(pages[i]) << PAGE_SHIFT;
                end = start + PAGE_SIZE;
-               free_memtype(start, end);
+               memtype_free(start, end);
        }
 
        return 0;
@@ -2215,7 +2215,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
                .pgd = pgd,
                .numpages = numpages,
                .mask_set = __pgprot(0),
-               .mask_clr = __pgprot(0),
+               .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
                .flags = 0,
        };
 
@@ -2224,12 +2224,6 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
        if (!(__supported_pte_mask & _PAGE_NX))
                goto out;
 
-       if (!(page_flags & _PAGE_NX))
-               cpa.mask_clr = __pgprot(_PAGE_NX);
-
-       if (!(page_flags & _PAGE_RW))
-               cpa.mask_clr = __pgprot(_PAGE_RW);
-
        if (!(page_flags & _PAGE_ENC))
                cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
 
@@ -2281,5 +2275,5 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
  * be exposed to the rest of the kernel. Include these directly here.
  */
 #ifdef CONFIG_CPA_DEBUG
-#include "pageattr-test.c"
+#include "cpa-test.c"
 #endif
diff --git a/arch/x86/mm/pat_interval.c b/arch/x86/mm/pat_interval.c
deleted file mode 100644 (file)
index 6855362..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Handle caching attributes in page tables (PAT)
- *
- * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- *          Suresh B Siddha <suresh.b.siddha@intel.com>
- *
- * Interval tree used to store the PAT memory type reservations.
- */
-
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/interval_tree_generic.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-
-#include <asm/pgtable.h>
-#include <asm/pat.h>
-
-#include "pat_internal.h"
-
-/*
- * The memtype tree keeps track of memory type for specific
- * physical memory areas. Without proper tracking, conflicting memory
- * types in different mappings can cause CPU cache corruption.
- *
- * The tree is an interval tree (augmented rbtree) with tree ordered
- * on starting address. Tree can contain multiple entries for
- * different regions which overlap. All the aliases have the same
- * cache attributes of course.
- *
- * memtype_lock protects the rbtree.
- */
-static inline u64 memtype_interval_start(struct memtype *memtype)
-{
-       return memtype->start;
-}
-
-static inline u64 memtype_interval_end(struct memtype *memtype)
-{
-       return memtype->end - 1;
-}
-INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
-                    memtype_interval_start, memtype_interval_end,
-                    static, memtype_interval)
-
-static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
-
-enum {
-       MEMTYPE_EXACT_MATCH     = 0,
-       MEMTYPE_END_MATCH       = 1
-};
-
-static struct memtype *memtype_match(u64 start, u64 end, int match_type)
-{
-       struct memtype *match;
-
-       match = memtype_interval_iter_first(&memtype_rbroot, start, end-1);
-       while (match != NULL && match->start < end) {
-               if ((match_type == MEMTYPE_EXACT_MATCH) &&
-                   (match->start == start) && (match->end == end))
-                       return match;
-
-               if ((match_type == MEMTYPE_END_MATCH) &&
-                   (match->start < start) && (match->end == end))
-                       return match;
-
-               match = memtype_interval_iter_next(match, start, end-1);
-       }
-
-       return NULL; /* Returns NULL if there is no match */
-}
-
-static int memtype_check_conflict(u64 start, u64 end,
-                                 enum page_cache_mode reqtype,
-                                 enum page_cache_mode *newtype)
-{
-       struct memtype *match;
-       enum page_cache_mode found_type = reqtype;
-
-       match = memtype_interval_iter_first(&memtype_rbroot, start, end-1);
-       if (match == NULL)
-               goto success;
-
-       if (match->type != found_type && newtype == NULL)
-               goto failure;
-
-       dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
-       found_type = match->type;
-
-       match = memtype_interval_iter_next(match, start, end-1);
-       while (match) {
-               if (match->type != found_type)
-                       goto failure;
-
-               match = memtype_interval_iter_next(match, start, end-1);
-       }
-success:
-       if (newtype)
-               *newtype = found_type;
-
-       return 0;
-
-failure:
-       pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
-               current->comm, current->pid, start, end,
-               cattr_name(found_type), cattr_name(match->type));
-       return -EBUSY;
-}
-
-int memtype_check_insert(struct memtype *new,
-                        enum page_cache_mode *ret_type)
-{
-       int err = 0;
-
-       err = memtype_check_conflict(new->start, new->end, new->type, ret_type);
-       if (err)
-               return err;
-
-       if (ret_type)
-               new->type = *ret_type;
-
-       memtype_interval_insert(new, &memtype_rbroot);
-       return 0;
-}
-
-struct memtype *memtype_erase(u64 start, u64 end)
-{
-       struct memtype *data;
-
-       /*
-        * Since the memtype_rbroot tree allows overlapping ranges,
-        * memtype_erase() checks with EXACT_MATCH first, i.e. free
-        * a whole node for the munmap case.  If no such entry is found,
-        * it then checks with END_MATCH, i.e. shrink the size of a node
-        * from the end for the mremap case.
-        */
-       data = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
-       if (!data) {
-               data = memtype_match(start, end, MEMTYPE_END_MATCH);
-               if (!data)
-                       return ERR_PTR(-EINVAL);
-       }
-
-       if (data->start == start) {
-               /* munmap: erase this node */
-               memtype_interval_remove(data, &memtype_rbroot);
-       } else {
-               /* mremap: update the end value of this node */
-               memtype_interval_remove(data, &memtype_rbroot);
-               data->end = start;
-               memtype_interval_insert(data, &memtype_rbroot);
-               return NULL;
-       }
-
-       return data;
-}
-
-struct memtype *memtype_lookup(u64 addr)
-{
-       return memtype_interval_iter_first(&memtype_rbroot, addr,
-                                          addr + PAGE_SIZE-1);
-}
-
-#if defined(CONFIG_DEBUG_FS)
-int memtype_copy_nth_element(struct memtype *out, loff_t pos)
-{
-       struct memtype *match;
-       int i = 1;
-
-       match = memtype_interval_iter_first(&memtype_rbroot, 0, ULONG_MAX);
-       while (match && pos != i) {
-               match = memtype_interval_iter_next(match, 0, ULONG_MAX);
-               i++;
-       }
-
-       if (match) { /* pos == i */
-               *out = *match;
-               return 0;
-       } else {
-               return 1;
-       }
-}
-#endif
index 9bb7f0ab9fe625b77c64bec89b2a0ff9cac222bf..0e6700eaa4f921a704bb1fa7b137552754eda27d 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/io.h>
+#include <linux/vmalloc.h>
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
index bdc98150d4db887ecc43515bb60ae7dd19664ae3..fc3f3d3e2ef210fc31b2eb661f4c5f73457d42ab 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/mm.h>
 
 #include <asm/page.h>
+#include <linux/vmalloc.h>
 
 #include "physaddr.h"
 
index 92153d054d6c542b06ecdc87a4ec1e3e0b39786e..bda73cb7a0446b932a60710525b1a605331445cf 100644 (file)
@@ -79,7 +79,7 @@ static void do_read_far_test(void __iomem *p)
 
 static void do_test(unsigned long size)
 {
-       void __iomem *p = ioremap_nocache(mmio_address, size);
+       void __iomem *p = ioremap(mmio_address, size);
        if (!p) {
                pr_err("could not ioremap, aborting.\n");
                return;
@@ -104,7 +104,7 @@ static void do_test_bulk_ioremapping(void)
        int i;
 
        for (i = 0; i < 10; ++i) {
-               p = ioremap_nocache(mmio_address, PAGE_SIZE);
+               p = ioremap(mmio_address, PAGE_SIZE);
                if (p)
                        iounmap(p);
        }
index e6a9edc5baaf07b99925519351a895520e2d7949..66f96f21a7b60b2a73832fd291c1f7ae783cb2c5 100644 (file)
@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
                               (void *)info, 1);
        else
                on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
-                               (void *)info, 1, GFP_ATOMIC, cpumask);
+                               (void *)info, 1, cpumask);
 }
 
 /*
index 9df652d3d9275bca15c9d5b8ff54de8e8235f161..fa855bbaebaf191530498bbf29487ef61867071b 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/errno.h>
 #include <linux/memblock.h>
 
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/e820/api.h>
 #include <asm/pci_x86.h>
 #include <asm/io_apic.h>
index 887d181b769b0cd2789f817f99efb64eaffc73b7..0c7b6e66c64484d2bfa402033e70a3f5c901a485 100644 (file)
@@ -105,7 +105,7 @@ static void __iomem *mcfg_ioremap(struct pci_mmcfg_region *cfg)
        start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
        num_buses = cfg->end_bus - cfg->start_bus + 1;
        size = PCI_MMCFG_BUS_OFFSET(num_buses);
-       addr = ioremap_nocache(start, size);
+       addr = ioremap(start, size);
        if (addr)
                addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
        return addr;
index fe29f3f5d384f286b03e735350dac2ac95f61ccc..84b09c230cbd5f348bea6edfb860000382aa05d0 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
-OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
+KASAN_SANITIZE := n
+GCOV_PROFILE := n
 
 obj-$(CONFIG_EFI)              += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_EFI_MIXED)                += efi_thunk_$(BITS).o
index 38d44f36d5ede328a7df74240b62d5cf690014f6..59f7f6d60cf61a8410688514bd5fc9107ccd5d75 100644 (file)
@@ -54,8 +54,8 @@
 #include <asm/x86_init.h>
 #include <asm/uv/uv.h>
 
-static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
+static u64 efi_systab_phys __initdata;
 
 static efi_config_table_type_t arch_tables[] __initdata = {
 #ifdef CONFIG_X86_UV
@@ -97,32 +97,6 @@ static int __init setup_add_efi_memmap(char *arg)
 }
 early_param("add_efi_memmap", setup_add_efi_memmap);
 
-static efi_status_t __init phys_efi_set_virtual_address_map(
-       unsigned long memory_map_size,
-       unsigned long descriptor_size,
-       u32 descriptor_version,
-       efi_memory_desc_t *virtual_map)
-{
-       efi_status_t status;
-       unsigned long flags;
-       pgd_t *save_pgd;
-
-       save_pgd = efi_call_phys_prolog();
-       if (!save_pgd)
-               return EFI_ABORTED;
-
-       /* Disable interrupts around EFI calls: */
-       local_irq_save(flags);
-       status = efi_call_phys(efi_phys.set_virtual_address_map,
-                              memory_map_size, descriptor_size,
-                              descriptor_version, virtual_map);
-       local_irq_restore(flags);
-
-       efi_call_phys_epilog(save_pgd);
-
-       return status;
-}
-
 void __init efi_find_mirror(void)
 {
        efi_memory_desc_t *md;
@@ -330,10 +304,16 @@ static void __init efi_clean_memmap(void)
        }
 
        if (n_removal > 0) {
-               u64 size = efi.memmap.nr_map - n_removal;
+               struct efi_memory_map_data data = {
+                       .phys_map = efi.memmap.phys_map,
+                       .desc_version = efi.memmap.desc_version,
+                       .desc_size = efi.memmap.desc_size,
+                       .size = data.desc_size * (efi.memmap.nr_map - n_removal),
+                       .flags = 0,
+               };
 
                pr_warn("Removing %d invalid memory map entries.\n", n_removal);
-               efi_memmap_install(efi.memmap.phys_map, size);
+               efi_memmap_install(&data);
        }
 }
 
@@ -353,89 +333,90 @@ void __init efi_print_memmap(void)
        }
 }
 
-static int __init efi_systab_init(void *phys)
+static int __init efi_systab_init(u64 phys)
 {
+       int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
+                                         : sizeof(efi_system_table_32_t);
+       bool over4g = false;
+       void *p;
+
+       p = early_memremap_ro(phys, size);
+       if (p == NULL) {
+               pr_err("Couldn't map the system table!\n");
+               return -ENOMEM;
+       }
+
        if (efi_enabled(EFI_64BIT)) {
-               efi_system_table_64_t *systab64;
-               struct efi_setup_data *data = NULL;
-               u64 tmp = 0;
+               const efi_system_table_64_t *systab64 = p;
+
+               efi_systab.hdr                  = systab64->hdr;
+               efi_systab.fw_vendor            = systab64->fw_vendor;
+               efi_systab.fw_revision          = systab64->fw_revision;
+               efi_systab.con_in_handle        = systab64->con_in_handle;
+               efi_systab.con_in               = systab64->con_in;
+               efi_systab.con_out_handle       = systab64->con_out_handle;
+               efi_systab.con_out              = (void *)(unsigned long)systab64->con_out;
+               efi_systab.stderr_handle        = systab64->stderr_handle;
+               efi_systab.stderr               = systab64->stderr;
+               efi_systab.runtime              = (void *)(unsigned long)systab64->runtime;
+               efi_systab.boottime             = (void *)(unsigned long)systab64->boottime;
+               efi_systab.nr_tables            = systab64->nr_tables;
+               efi_systab.tables               = systab64->tables;
+
+               over4g = systab64->con_in_handle        > U32_MAX ||
+                        systab64->con_in               > U32_MAX ||
+                        systab64->con_out_handle       > U32_MAX ||
+                        systab64->con_out              > U32_MAX ||
+                        systab64->stderr_handle        > U32_MAX ||
+                        systab64->stderr               > U32_MAX ||
+                        systab64->boottime             > U32_MAX;
 
                if (efi_setup) {
-                       data = early_memremap(efi_setup, sizeof(*data));
-                       if (!data)
+                       struct efi_setup_data *data;
+
+                       data = early_memremap_ro(efi_setup, sizeof(*data));
+                       if (!data) {
+                               early_memunmap(p, size);
                                return -ENOMEM;
-               }
-               systab64 = early_memremap((unsigned long)phys,
-                                        sizeof(*systab64));
-               if (systab64 == NULL) {
-                       pr_err("Couldn't map the system table!\n");
-                       if (data)
-                               early_memunmap(data, sizeof(*data));
-                       return -ENOMEM;
-               }
+                       }
+
+                       efi_systab.fw_vendor    = (unsigned long)data->fw_vendor;
+                       efi_systab.runtime      = (void *)(unsigned long)data->runtime;
+                       efi_systab.tables       = (unsigned long)data->tables;
+
+                       over4g |= data->fw_vendor       > U32_MAX ||
+                                 data->runtime         > U32_MAX ||
+                                 data->tables          > U32_MAX;
 
-               efi_systab.hdr = systab64->hdr;
-               efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
-                                             systab64->fw_vendor;
-               tmp |= data ? data->fw_vendor : systab64->fw_vendor;
-               efi_systab.fw_revision = systab64->fw_revision;
-               efi_systab.con_in_handle = systab64->con_in_handle;
-               tmp |= systab64->con_in_handle;
-               efi_systab.con_in = systab64->con_in;
-               tmp |= systab64->con_in;
-               efi_systab.con_out_handle = systab64->con_out_handle;
-               tmp |= systab64->con_out_handle;
-               efi_systab.con_out = systab64->con_out;
-               tmp |= systab64->con_out;
-               efi_systab.stderr_handle = systab64->stderr_handle;
-               tmp |= systab64->stderr_handle;
-               efi_systab.stderr = systab64->stderr;
-               tmp |= systab64->stderr;
-               efi_systab.runtime = data ?
-                                    (void *)(unsigned long)data->runtime :
-                                    (void *)(unsigned long)systab64->runtime;
-               tmp |= data ? data->runtime : systab64->runtime;
-               efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
-               tmp |= systab64->boottime;
-               efi_systab.nr_tables = systab64->nr_tables;
-               efi_systab.tables = data ? (unsigned long)data->tables :
-                                          systab64->tables;
-               tmp |= data ? data->tables : systab64->tables;
-
-               early_memunmap(systab64, sizeof(*systab64));
-               if (data)
                        early_memunmap(data, sizeof(*data));
-#ifdef CONFIG_X86_32
-               if (tmp >> 32) {
-                       pr_err("EFI data located above 4GB, disabling EFI.\n");
-                       return -EINVAL;
+               } else {
+                       over4g |= systab64->fw_vendor   > U32_MAX ||
+                                 systab64->runtime     > U32_MAX ||
+                                 systab64->tables      > U32_MAX;
                }
-#endif
        } else {
-               efi_system_table_32_t *systab32;
-
-               systab32 = early_memremap((unsigned long)phys,
-                                        sizeof(*systab32));
-               if (systab32 == NULL) {
-                       pr_err("Couldn't map the system table!\n");
-                       return -ENOMEM;
-               }
-
-               efi_systab.hdr = systab32->hdr;
-               efi_systab.fw_vendor = systab32->fw_vendor;
-               efi_systab.fw_revision = systab32->fw_revision;
-               efi_systab.con_in_handle = systab32->con_in_handle;
-               efi_systab.con_in = systab32->con_in;
-               efi_systab.con_out_handle = systab32->con_out_handle;
-               efi_systab.con_out = systab32->con_out;
-               efi_systab.stderr_handle = systab32->stderr_handle;
-               efi_systab.stderr = systab32->stderr;
-               efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
-               efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
-               efi_systab.nr_tables = systab32->nr_tables;
-               efi_systab.tables = systab32->tables;
-
-               early_memunmap(systab32, sizeof(*systab32));
+               const efi_system_table_32_t *systab32 = p;
+
+               efi_systab.hdr                  = systab32->hdr;
+               efi_systab.fw_vendor            = systab32->fw_vendor;
+               efi_systab.fw_revision          = systab32->fw_revision;
+               efi_systab.con_in_handle        = systab32->con_in_handle;
+               efi_systab.con_in               = systab32->con_in;
+               efi_systab.con_out_handle       = systab32->con_out_handle;
+               efi_systab.con_out              = (void *)(unsigned long)systab32->con_out;
+               efi_systab.stderr_handle        = systab32->stderr_handle;
+               efi_systab.stderr               = systab32->stderr;
+               efi_systab.runtime              = (void *)(unsigned long)systab32->runtime;
+               efi_systab.boottime             = (void *)(unsigned long)systab32->boottime;
+               efi_systab.nr_tables            = systab32->nr_tables;
+               efi_systab.tables               = systab32->tables;
+       }
+
+       early_memunmap(p, size);
+
+       if (IS_ENABLED(CONFIG_X86_32) && over4g) {
+               pr_err("EFI data located above 4GB, disabling EFI.\n");
+               return -EINVAL;
        }
 
        efi.systab = &efi_systab;
@@ -455,108 +436,23 @@ static int __init efi_systab_init(void *phys)
        return 0;
 }
 
-static int __init efi_runtime_init32(void)
-{
-       efi_runtime_services_32_t *runtime;
-
-       runtime = early_memremap((unsigned long)efi.systab->runtime,
-                       sizeof(efi_runtime_services_32_t));
-       if (!runtime) {
-               pr_err("Could not map the runtime service table!\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * We will only need *early* access to the SetVirtualAddressMap
-        * EFI runtime service. All other runtime services will be called
-        * via the virtual mapping.
-        */
-       efi_phys.set_virtual_address_map =
-                       (efi_set_virtual_address_map_t *)
-                       (unsigned long)runtime->set_virtual_address_map;
-       early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
-
-       return 0;
-}
-
-static int __init efi_runtime_init64(void)
-{
-       efi_runtime_services_64_t *runtime;
-
-       runtime = early_memremap((unsigned long)efi.systab->runtime,
-                       sizeof(efi_runtime_services_64_t));
-       if (!runtime) {
-               pr_err("Could not map the runtime service table!\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * We will only need *early* access to the SetVirtualAddressMap
-        * EFI runtime service. All other runtime services will be called
-        * via the virtual mapping.
-        */
-       efi_phys.set_virtual_address_map =
-                       (efi_set_virtual_address_map_t *)
-                       (unsigned long)runtime->set_virtual_address_map;
-       early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
-
-       return 0;
-}
-
-static int __init efi_runtime_init(void)
-{
-       int rv;
-
-       /*
-        * Check out the runtime services table. We need to map
-        * the runtime services table so that we can grab the physical
-        * address of several of the EFI runtime functions, needed to
-        * set the firmware into virtual mode.
-        *
-        * When EFI_PARAVIRT is in force then we could not map runtime
-        * service memory region because we do not have direct access to it.
-        * However, runtime services are available through proxy functions
-        * (e.g. in case of Xen dom0 EFI implementation they call special
-        * hypercall which executes relevant EFI functions) and that is why
-        * they are always enabled.
-        */
-
-       if (!efi_enabled(EFI_PARAVIRT)) {
-               if (efi_enabled(EFI_64BIT))
-                       rv = efi_runtime_init64();
-               else
-                       rv = efi_runtime_init32();
-
-               if (rv)
-                       return rv;
-       }
-
-       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-
-       return 0;
-}
-
 void __init efi_init(void)
 {
        efi_char16_t *c16;
        char vendor[100] = "unknown";
        int i = 0;
-       void *tmp;
 
-#ifdef CONFIG_X86_32
-       if (boot_params.efi_info.efi_systab_hi ||
-           boot_params.efi_info.efi_memmap_hi) {
+       if (IS_ENABLED(CONFIG_X86_32) &&
+           (boot_params.efi_info.efi_systab_hi ||
+            boot_params.efi_info.efi_memmap_hi)) {
                pr_info("Table located above 4GB, disabling EFI.\n");
                return;
        }
-       efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
-#else
-       efi_phys.systab = (efi_system_table_t *)
-                         (boot_params.efi_info.efi_systab |
-                         ((__u64)boot_params.efi_info.efi_systab_hi<<32));
-#endif
 
-       if (efi_systab_init(efi_phys.systab))
+       efi_systab_phys = boot_params.efi_info.efi_systab |
+                         ((__u64)boot_params.efi_info.efi_systab_hi << 32);
+
+       if (efi_systab_init(efi_systab_phys))
                return;
 
        efi.config_table = (unsigned long)efi.systab->tables;
@@ -566,14 +462,16 @@ void __init efi_init(void)
        /*
         * Show what we know for posterity
         */
-       c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
+       c16 = early_memremap_ro(efi.systab->fw_vendor,
+                               sizeof(vendor) * sizeof(efi_char16_t));
        if (c16) {
-               for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
-                       vendor[i] = *c16++;
+               for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
+                       vendor[i] = c16[i];
                vendor[i] = '\0';
-       } else
+               early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+       } else {
                pr_err("Could not map the firmware vendor!\n");
-       early_memunmap(tmp, 2);
+       }
 
        pr_info("EFI v%u.%.02u by %s\n",
                efi.systab->hdr.revision >> 16,
@@ -592,19 +490,21 @@ void __init efi_init(void)
 
        if (!efi_runtime_supported())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
-       else {
-               if (efi_runtime_disabled() || efi_runtime_init()) {
-                       efi_memmap_unmap();
-                       return;
-               }
+
+       if (!efi_runtime_supported() || efi_runtime_disabled()) {
+               efi_memmap_unmap();
+               return;
        }
 
+       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
        efi_clean_memmap();
 
        if (efi_enabled(EFI_DBG))
                efi_print_memmap();
 }
 
+#if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV)
+
 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
 {
        u64 addr, npages;
@@ -669,6 +569,8 @@ void __init old_map_region(efi_memory_desc_t *md)
                       (unsigned long long)md->phys_addr);
 }
 
+#endif
+
 /* Merge contiguous regions of the same type and attribute */
 static void __init efi_merge_regions(void)
 {
@@ -707,7 +609,7 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
 
        size = md->num_pages << EFI_PAGE_SHIFT;
        end = md->phys_addr + size;
-       systab = (u64)(unsigned long)efi_phys.systab;
+       systab = efi_systab_phys;
        if (md->phys_addr <= systab && systab < end) {
                systab += md->virt_addr - md->phys_addr;
                efi.systab = (efi_system_table_t *)(unsigned long)systab;
@@ -767,7 +669,7 @@ static inline void *efi_map_next_entry_reverse(void *entry)
  */
 static void *efi_map_next_entry(void *entry)
 {
-       if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+       if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) {
                /*
                 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
                 * config table feature requires us to map all entries
@@ -828,7 +730,7 @@ static bool should_map_region(efi_memory_desc_t *md)
         * Map all of RAM so that we can access arguments in the 1:1
         * mapping when making EFI runtime calls.
         */
-       if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) {
+       if (efi_is_mixed()) {
                if (md->type == EFI_CONVENTIONAL_MEMORY ||
                    md->type == EFI_LOADER_DATA ||
                    md->type == EFI_LOADER_CODE)
@@ -899,11 +801,11 @@ static void __init kexec_enter_virtual_mode(void)
 
        /*
         * We don't do virtual mode, since we don't do runtime services, on
-        * non-native EFI. With efi=old_map, we don't do runtime services in
+        * non-native EFI. With the UV1 memmap, we don't do runtime services in
         * kexec kernel because in the initial boot something else might
         * have been mapped at these virtual addresses.
         */
-       if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
+       if (efi_is_mixed() || efi_have_uv1_memmap()) {
                efi_memmap_unmap();
                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
                return;
@@ -958,11 +860,6 @@ static void __init kexec_enter_virtual_mode(void)
        efi.runtime_version = efi_systab.hdr.revision;
 
        efi_native_runtime_setup();
-
-       efi.set_virtual_address_map = NULL;
-
-       if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
-               runtime_code_page_mkexec();
 #endif
 }
 
@@ -974,9 +871,9 @@ static void __init kexec_enter_virtual_mode(void)
  *
  * The old method which used to update that memory descriptor with the
  * virtual address obtained from ioremap() is still supported when the
- * kernel is booted with efi=old_map on its command line. Same old
- * method enabled the runtime services to be called without having to
- * thunk back into physical mode for every invocation.
+ * kernel is booted on SG1 UV1 hardware. Same old method enabled the
+ * runtime services to be called without having to thunk back into
+ * physical mode for every invocation.
  *
  * The new method does a pagetable switch in a preemption-safe manner
  * so that we're in a different address space when calling a runtime
@@ -999,16 +896,14 @@ static void __init __efi_enter_virtual_mode(void)
 
        if (efi_alloc_page_tables()) {
                pr_err("Failed to allocate EFI page tables\n");
-               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-               return;
+               goto err;
        }
 
        efi_merge_regions();
        new_memmap = efi_map_regions(&count, &pg_shift);
        if (!new_memmap) {
                pr_err("Error reallocating memory, EFI runtime non-functional!\n");
-               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-               return;
+               goto err;
        }
 
        pa = __pa(new_memmap);
@@ -1022,8 +917,7 @@ static void __init __efi_enter_virtual_mode(void)
 
        if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
                pr_err("Failed to remap late EFI memory map\n");
-               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-               return;
+               goto err;
        }
 
        if (efi_enabled(EFI_DBG)) {
@@ -1031,34 +925,22 @@ static void __init __efi_enter_virtual_mode(void)
                efi_print_memmap();
        }
 
-       BUG_ON(!efi.systab);
+       if (WARN_ON(!efi.systab))
+               goto err;
 
-       if (efi_setup_page_tables(pa, 1 << pg_shift)) {
-               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-               return;
-       }
+       if (efi_setup_page_tables(pa, 1 << pg_shift))
+               goto err;
 
        efi_sync_low_kernel_mappings();
 
-       if (efi_is_native()) {
-               status = phys_efi_set_virtual_address_map(
-                               efi.memmap.desc_size * count,
-                               efi.memmap.desc_size,
-                               efi.memmap.desc_version,
-                               (efi_memory_desc_t *)pa);
-       } else {
-               status = efi_thunk_set_virtual_address_map(
-                               efi_phys.set_virtual_address_map,
-                               efi.memmap.desc_size * count,
-                               efi.memmap.desc_size,
-                               efi.memmap.desc_version,
-                               (efi_memory_desc_t *)pa);
-       }
-
+       status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
+                                            efi.memmap.desc_size,
+                                            efi.memmap.desc_version,
+                                            (efi_memory_desc_t *)pa);
        if (status != EFI_SUCCESS) {
-               pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
-                        status);
-               panic("EFI call to SetVirtualAddressMap() failed!");
+               pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
+                      status);
+               goto err;
        }
 
        efi_free_boot_services();
@@ -1071,13 +953,11 @@ static void __init __efi_enter_virtual_mode(void)
         */
        efi.runtime_version = efi_systab.hdr.revision;
 
-       if (efi_is_native())
+       if (!efi_is_mixed())
                efi_native_runtime_setup();
        else
                efi_thunk_runtime_setup();
 
-       efi.set_virtual_address_map = NULL;
-
        /*
         * Apply more restrictive page table mapping attributes now that
         * SVAM() has been called and the firmware has performed all
@@ -1087,6 +967,10 @@ static void __init __efi_enter_virtual_mode(void)
 
        /* clean DUMMY object */
        efi_delete_dummy_variable();
+       return;
+
+err:
+       clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 }
 
 void __init efi_enter_virtual_mode(void)
@@ -1102,20 +986,6 @@ void __init efi_enter_virtual_mode(void)
        efi_dump_pagetable();
 }
 
-static int __init arch_parse_efi_cmdline(char *str)
-{
-       if (!str) {
-               pr_warn("need at least one option\n");
-               return -EINVAL;
-       }
-
-       if (parse_option_str(str, "old_map"))
-               set_bit(EFI_OLD_MEMMAP, &efi.flags);
-
-       return 0;
-}
-early_param("efi", arch_parse_efi_cmdline);
-
 bool efi_is_table_address(unsigned long phys_addr)
 {
        unsigned int i;
index 9959657127f476ae3018894ffc31909ee2c6d79e..71dddd1620f98839aa65bdd6c3db56a8eaf7a675 100644 (file)
@@ -66,9 +66,17 @@ void __init efi_map_region(efi_memory_desc_t *md)
 void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
 void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
 
-pgd_t * __init efi_call_phys_prolog(void)
+efi_status_t efi_call_svam(efi_set_virtual_address_map_t *__efiapi *,
+                          u32, u32, u32, void *);
+
+efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
+                                               unsigned long descriptor_size,
+                                               u32 descriptor_version,
+                                               efi_memory_desc_t *virtual_map)
 {
        struct desc_ptr gdt_descr;
+       efi_status_t status;
+       unsigned long flags;
        pgd_t *save_pgd;
 
        /* Current pgd is swapper_pg_dir, we'll restore it later: */
@@ -80,14 +88,18 @@ pgd_t * __init efi_call_phys_prolog(void)
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
 
-       return save_pgd;
-}
+       /* Disable interrupts around EFI calls: */
+       local_irq_save(flags);
+       status = efi_call_svam(&efi.systab->runtime->set_virtual_address_map,
+                              memory_map_size, descriptor_size,
+                              descriptor_version, virtual_map);
+       local_irq_restore(flags);
 
-void __init efi_call_phys_epilog(pgd_t *save_pgd)
-{
        load_fixmap_gdt(0);
        load_cr3(save_pgd);
        __flush_tlb_all();
+
+       return status;
 }
 
 void __init efi_runtime_update_mappings(void)
index 08ce8177c3af154ae76ff7ed424d585ecde51984..e2accfe636bd96c94de7d4f4ef4281c5ac1fe494 100644 (file)
@@ -57,142 +57,6 @@ static u64 efi_va = EFI_VA_START;
 
 struct efi_scratch efi_scratch;
 
-static void __init early_code_mapping_set_exec(int executable)
-{
-       efi_memory_desc_t *md;
-
-       if (!(__supported_pte_mask & _PAGE_NX))
-               return;
-
-       /* Make EFI service code area executable */
-       for_each_efi_memory_desc(md) {
-               if (md->type == EFI_RUNTIME_SERVICES_CODE ||
-                   md->type == EFI_BOOT_SERVICES_CODE)
-                       efi_set_executable(md, executable);
-       }
-}
-
-pgd_t * __init efi_call_phys_prolog(void)
-{
-       unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
-       pgd_t *save_pgd, *pgd_k, *pgd_efi;
-       p4d_t *p4d, *p4d_k, *p4d_efi;
-       pud_t *pud;
-
-       int pgd;
-       int n_pgds, i, j;
-
-       if (!efi_enabled(EFI_OLD_MEMMAP)) {
-               efi_switch_mm(&efi_mm);
-               return efi_mm.pgd;
-       }
-
-       early_code_mapping_set_exec(1);
-
-       n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
-       save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
-       if (!save_pgd)
-               return NULL;
-
-       /*
-        * Build 1:1 identity mapping for efi=old_map usage. Note that
-        * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
-        * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
-        * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
-        * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
-        * This means here we can only reuse the PMD tables of the direct mapping.
-        */
-       for (pgd = 0; pgd < n_pgds; pgd++) {
-               addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
-               vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
-               pgd_efi = pgd_offset_k(addr_pgd);
-               save_pgd[pgd] = *pgd_efi;
-
-               p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
-               if (!p4d) {
-                       pr_err("Failed to allocate p4d table!\n");
-                       goto out;
-               }
-
-               for (i = 0; i < PTRS_PER_P4D; i++) {
-                       addr_p4d = addr_pgd + i * P4D_SIZE;
-                       p4d_efi = p4d + p4d_index(addr_p4d);
-
-                       pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
-                       if (!pud) {
-                               pr_err("Failed to allocate pud table!\n");
-                               goto out;
-                       }
-
-                       for (j = 0; j < PTRS_PER_PUD; j++) {
-                               addr_pud = addr_p4d + j * PUD_SIZE;
-
-                               if (addr_pud > (max_pfn << PAGE_SHIFT))
-                                       break;
-
-                               vaddr = (unsigned long)__va(addr_pud);
-
-                               pgd_k = pgd_offset_k(vaddr);
-                               p4d_k = p4d_offset(pgd_k, vaddr);
-                               pud[j] = *pud_offset(p4d_k, vaddr);
-                       }
-               }
-               pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
-       }
-
-       __flush_tlb_all();
-       return save_pgd;
-out:
-       efi_call_phys_epilog(save_pgd);
-       return NULL;
-}
-
-void __init efi_call_phys_epilog(pgd_t *save_pgd)
-{
-       /*
-        * After the lock is released, the original page table is restored.
-        */
-       int pgd_idx, i;
-       int nr_pgds;
-       pgd_t *pgd;
-       p4d_t *p4d;
-       pud_t *pud;
-
-       if (!efi_enabled(EFI_OLD_MEMMAP)) {
-               efi_switch_mm(efi_scratch.prev_mm);
-               return;
-       }
-
-       nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
-
-       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
-               pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
-               set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
-
-               if (!pgd_present(*pgd))
-                       continue;
-
-               for (i = 0; i < PTRS_PER_P4D; i++) {
-                       p4d = p4d_offset(pgd,
-                                        pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
-
-                       if (!p4d_present(*p4d))
-                               continue;
-
-                       pud = (pud_t *)p4d_page_vaddr(*p4d);
-                       pud_free(&init_mm, pud);
-               }
-
-               p4d = (p4d_t *)pgd_page_vaddr(*pgd);
-               p4d_free(&init_mm, p4d);
-       }
-
-       kfree(save_pgd);
-
-       __flush_tlb_all();
-       early_code_mapping_set_exec(0);
-}
-
 EXPORT_SYMBOL_GPL(efi_mm);
 
 /*
@@ -211,7 +75,7 @@ int __init efi_alloc_page_tables(void)
        pud_t *pud;
        gfp_t gfp_mask;
 
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                return 0;
 
        gfp_mask = GFP_KERNEL | __GFP_ZERO;
@@ -252,7 +116,7 @@ void efi_sync_low_kernel_mappings(void)
        pud_t *pud_k, *pud_efi;
        pgd_t *efi_pgd = efi_mm.pgd;
 
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                return;
 
        /*
@@ -346,7 +210,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        unsigned npages;
        pgd_t *pgd = efi_mm.pgd;
 
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                return 0;
 
        /*
@@ -373,10 +237,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * as trim_bios_range() will reserve the first page and isolate it away
         * from memory allocators anyway.
         */
-       pf = _PAGE_RW;
-       if (sev_active())
-               pf |= _PAGE_ENC;
-
        if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
                pr_err("Failed to create 1:1 mapping for the first page!\n");
                return 1;
@@ -388,21 +248,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * text and allocate a new stack because we can't rely on the
         * stack pointer being < 4GB.
         */
-       if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
+       if (!efi_is_mixed())
                return 0;
 
        page = alloc_page(GFP_KERNEL|__GFP_DMA32);
-       if (!page)
-               panic("Unable to allocate EFI runtime stack < 4GB\n");
+       if (!page) {
+               pr_err("Unable to allocate EFI runtime stack < 4GB\n");
+               return 1;
+       }
 
-       efi_scratch.phys_stack = virt_to_phys(page_address(page));
-       efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
+       efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
 
-       npages = (_etext - _text) >> PAGE_SHIFT;
+       npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT;
        text = __pa(_text);
        pfn = text >> PAGE_SHIFT;
 
-       pf = _PAGE_RW | _PAGE_ENC;
+       pf = _PAGE_ENC;
        if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
                pr_err("Failed to map kernel text 1:1\n");
                return 1;
@@ -417,6 +278,22 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
        unsigned long pfn;
        pgd_t *pgd = efi_mm.pgd;
 
+       /*
+        * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
+        * executable images in memory that consist of both R-X and
+        * RW- sections, so we cannot apply read-only or non-exec
+        * permissions just yet. However, modern EFI systems provide
+        * a memory attributes table that describes those sections
+        * with the appropriate restricted permissions, which are
+        * applied in efi_runtime_update_mappings() below. All other
+        * regions can be mapped non-executable at this point, with
+        * the exception of boot services code regions, but those will
+        * be unmapped again entirely in efi_free_boot_services().
+        */
+       if (md->type != EFI_BOOT_SERVICES_CODE &&
+           md->type != EFI_RUNTIME_SERVICES_CODE)
+               flags |= _PAGE_NX;
+
        if (!(md->attribute & EFI_MEMORY_WB))
                flags |= _PAGE_PCD;
 
@@ -434,7 +311,7 @@ void __init efi_map_region(efi_memory_desc_t *md)
        unsigned long size = md->num_pages << PAGE_SHIFT;
        u64 pa = md->phys_addr;
 
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                return old_map_region(md);
 
        /*
@@ -449,7 +326,7 @@ void __init efi_map_region(efi_memory_desc_t *md)
         * booting in EFI mixed mode, because even though we may be
         * running a 64-bit kernel, the firmware may only be 32-bit.
         */
-       if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
+       if (efi_is_mixed()) {
                md->virt_addr = md->phys_addr;
                return;
        }
@@ -491,26 +368,6 @@ void __init efi_map_region_fixed(efi_memory_desc_t *md)
        __map_region(md, md->virt_addr);
 }
 
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
-                                u32 type, u64 attribute)
-{
-       unsigned long last_map_pfn;
-
-       if (type == EFI_MEMORY_MAPPED_IO)
-               return ioremap(phys_addr, size);
-
-       last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
-       if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
-               unsigned long top = last_map_pfn << PAGE_SHIFT;
-               efi_ioremap(top, size - (top - phys_addr), type, attribute);
-       }
-
-       if (!(attribute & EFI_MEMORY_WB))
-               efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
-
-       return (void __iomem *)__va(phys_addr);
-}
-
 void __init parse_efi_setup(u64 phys_addr, u32 data_len)
 {
        efi_setup = phys_addr + sizeof(struct setup_data);
@@ -559,7 +416,7 @@ void __init efi_runtime_update_mappings(void)
 {
        efi_memory_desc_t *md;
 
-       if (efi_enabled(EFI_OLD_MEMMAP)) {
+       if (efi_have_uv1_memmap()) {
                if (__supported_pte_mask & _PAGE_NX)
                        runtime_code_page_mkexec();
                return;
@@ -613,7 +470,7 @@ void __init efi_runtime_update_mappings(void)
 void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                ptdump_walk_pgd_level(NULL, swapper_pg_dir);
        else
                ptdump_walk_pgd_level(NULL, efi_mm.pgd);
@@ -634,63 +491,74 @@ void efi_switch_mm(struct mm_struct *mm)
        switch_mm(efi_scratch.prev_mm, mm, NULL);
 }
 
-#ifdef CONFIG_EFI_MIXED
-extern efi_status_t efi64_thunk(u32, ...);
-
 static DEFINE_SPINLOCK(efi_runtime_lock);
 
-#define runtime_service32(func)                                                 \
-({                                                                      \
-       u32 table = (u32)(unsigned long)efi.systab;                      \
-       u32 *rt, *___f;                                                  \
-                                                                        \
-       rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime));  \
-       ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
-       *___f;                                                           \
+/*
+ * DS and ES contain user values.  We need to save them.
+ * The 32-bit EFI code needs a valid DS, ES, and SS.  There's no
+ * need to save the old SS: __KERNEL_DS is always acceptable.
+ */
+#define __efi_thunk(func, ...)                                         \
+({                                                                     \
+       efi_runtime_services_32_t *__rt;                                \
+       unsigned short __ds, __es;                                      \
+       efi_status_t ____s;                                             \
+                                                                       \
+       __rt = (void *)(unsigned long)efi.systab->mixed_mode.runtime;   \
+                                                                       \
+       savesegment(ds, __ds);                                          \
+       savesegment(es, __es);                                          \
+                                                                       \
+       loadsegment(ss, __KERNEL_DS);                                   \
+       loadsegment(ds, __KERNEL_DS);                                   \
+       loadsegment(es, __KERNEL_DS);                                   \
+                                                                       \
+       ____s = efi64_thunk(__rt->func, __VA_ARGS__);                   \
+                                                                       \
+       loadsegment(ds, __ds);                                          \
+       loadsegment(es, __es);                                          \
+                                                                       \
+       ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32;       \
+       ____s;                                                          \
 })
 
 /*
  * Switch to the EFI page tables early so that we can access the 1:1
  * runtime services mappings which are not mapped in any other page
- * tables. This function must be called before runtime_service32().
+ * tables.
  *
  * Also, disable interrupts because the IDT points to 64-bit handlers,
  * which aren't going to function correctly when we switch to 32-bit.
  */
-#define efi_thunk(f, ...)                                              \
+#define efi_thunk(func...)                                             \
 ({                                                                     \
        efi_status_t __s;                                               \
-       u32 __func;                                                     \
                                                                        \
        arch_efi_call_virt_setup();                                     \
                                                                        \
-       __func = runtime_service32(f);                                  \
-       __s = efi64_thunk(__func, __VA_ARGS__);                         \
+       __s = __efi_thunk(func);                                        \
                                                                        \
        arch_efi_call_virt_teardown();                                  \
                                                                        \
        __s;                                                            \
 })
 
-efi_status_t efi_thunk_set_virtual_address_map(
-       void *phys_set_virtual_address_map,
-       unsigned long memory_map_size,
-       unsigned long descriptor_size,
-       u32 descriptor_version,
-       efi_memory_desc_t *virtual_map)
+static efi_status_t __init __no_sanitize_address
+efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
+                                 unsigned long descriptor_size,
+                                 u32 descriptor_version,
+                                 efi_memory_desc_t *virtual_map)
 {
        efi_status_t status;
        unsigned long flags;
-       u32 func;
 
        efi_sync_low_kernel_mappings();
        local_irq_save(flags);
 
        efi_switch_mm(&efi_mm);
 
-       func = (u32)(unsigned long)phys_set_virtual_address_map;
-       status = efi64_thunk(func, memory_map_size, descriptor_size,
-                            descriptor_version, virtual_map);
+       status = __efi_thunk(set_virtual_address_map, memory_map_size,
+                            descriptor_size, descriptor_version, virtual_map);
 
        efi_switch_mm(efi_scratch.prev_mm);
        local_irq_restore(flags);
@@ -993,8 +861,11 @@ efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
        return EFI_UNSUPPORTED;
 }
 
-void efi_thunk_runtime_setup(void)
+void __init efi_thunk_runtime_setup(void)
 {
+       if (!IS_ENABLED(CONFIG_EFI_MIXED))
+               return;
+
        efi.get_time = efi_thunk_get_time;
        efi.set_time = efi_thunk_set_time;
        efi.get_wakeup_time = efi_thunk_get_wakeup_time;
@@ -1010,4 +881,46 @@ void efi_thunk_runtime_setup(void)
        efi.update_capsule = efi_thunk_update_capsule;
        efi.query_capsule_caps = efi_thunk_query_capsule_caps;
 }
-#endif /* CONFIG_EFI_MIXED */
+
+efi_status_t __init __no_sanitize_address
+efi_set_virtual_address_map(unsigned long memory_map_size,
+                           unsigned long descriptor_size,
+                           u32 descriptor_version,
+                           efi_memory_desc_t *virtual_map)
+{
+       efi_status_t status;
+       unsigned long flags;
+       pgd_t *save_pgd = NULL;
+
+       if (efi_is_mixed())
+               return efi_thunk_set_virtual_address_map(memory_map_size,
+                                                        descriptor_size,
+                                                        descriptor_version,
+                                                        virtual_map);
+
+       if (efi_have_uv1_memmap()) {
+               save_pgd = efi_uv1_memmap_phys_prolog();
+               if (!save_pgd)
+                       return EFI_ABORTED;
+       } else {
+               efi_switch_mm(&efi_mm);
+       }
+
+       kernel_fpu_begin();
+
+       /* Disable interrupts around EFI calls: */
+       local_irq_save(flags);
+       status = efi_call(efi.systab->runtime->set_virtual_address_map,
+                         memory_map_size, descriptor_size,
+                         descriptor_version, virtual_map);
+       local_irq_restore(flags);
+
+       kernel_fpu_end();
+
+       if (save_pgd)
+               efi_uv1_memmap_phys_epilog(save_pgd);
+       else
+               efi_switch_mm(efi_scratch.prev_mm);
+
+       return status;
+}
index eed8b5b441f8c4e46b72ad36c1f84c5a2633afea..75c46e7a809f38ca96bfe5a9ebc473b7c87b5124 100644 (file)
  */
 
 #include <linux/linkage.h>
+#include <linux/init.h>
 #include <asm/page_types.h>
 
-/*
- * efi_call_phys(void *, ...) is a function with variable parameters.
- * All the callers of this function assure that all the parameters are 4-bytes.
- */
-
-/*
- * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save.
- * So we'd better save all of them at the beginning of this function and restore
- * at the end no matter how many we use, because we can not assure EFI runtime
- * service functions will comply with gcc calling convention, too.
- */
+       __INIT
+SYM_FUNC_START(efi_call_svam)
+       push    8(%esp)
+       push    8(%esp)
+       push    %ecx
+       push    %edx
 
-.text
-SYM_FUNC_START(efi_call_phys)
        /*
-        * 0. The function can only be called in Linux kernel. So CS has been
-        * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
-        * the values of these registers are the same. And, the corresponding
-        * GDT entries are identical. So I will do nothing about segment reg
-        * and GDT, but change GDT base register in prolog and epilog.
-        */
-
-       /*
-        * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET.
-        * But to make it smoothly switch from virtual mode to flat mode.
-        * The mapping of lower virtual memory has been created in prolog and
-        * epilog.
+        * Switch to the flat mapped alias of this routine, by jumping to the
+        * address of label '1' after subtracting PAGE_OFFSET from it.
         */
        movl    $1f, %edx
        subl    $__PAGE_OFFSET, %edx
        jmp     *%edx
 1:
 
-       /*
-        * 2. Now on the top of stack is the return
-        * address in the caller of efi_call_phys(), then parameter 1,
-        * parameter 2, ..., param n. To make things easy, we save the return
-        * address of efi_call_phys in a global variable.
-        */
-       popl    %edx
-       movl    %edx, saved_return_addr
-       /* get the function pointer into ECX*/
-       popl    %ecx
-       movl    %ecx, efi_rt_function_ptr
-       movl    $2f, %edx
-       subl    $__PAGE_OFFSET, %edx
-       pushl   %edx
-
-       /*
-        * 3. Clear PG bit in %CR0.
-        */
+       /* disable paging */
        movl    %cr0, %edx
        andl    $0x7fffffff, %edx
        movl    %edx, %cr0
-       jmp     1f
-1:
 
-       /*
-        * 4. Adjust stack pointer.
-        */
+       /* convert the stack pointer to a flat mapped address */
        subl    $__PAGE_OFFSET, %esp
 
-       /*
-        * 5. Call the physical function.
-        */
-       jmp     *%ecx
+       /* call the EFI routine */
+       call    *(%eax)
 
-2:
-       /*
-        * 6. After EFI runtime service returns, control will return to
-        * following instruction. We'd better readjust stack pointer first.
-        */
-       addl    $__PAGE_OFFSET, %esp
+       /* convert ESP back to a kernel VA, and pop the outgoing args */
+       addl    $__PAGE_OFFSET + 16, %esp
 
-       /*
-        * 7. Restore PG bit
-        */
+       /* re-enable paging */
        movl    %cr0, %edx
        orl     $0x80000000, %edx
        movl    %edx, %cr0
-       jmp     1f
-1:
-       /*
-        * 8. Now restore the virtual mode from flat mode by
-        * adding EIP with PAGE_OFFSET.
-        */
-       movl    $1f, %edx
-       jmp     *%edx
-1:
-
-       /*
-        * 9. Balance the stack. And because EAX contain the return value,
-        * we'd better not clobber it.
-        */
-       leal    efi_rt_function_ptr, %edx
-       movl    (%edx), %ecx
-       pushl   %ecx
 
-       /*
-        * 10. Push the saved return address onto the stack and return.
-        */
-       leal    saved_return_addr, %edx
-       movl    (%edx), %ecx
-       pushl   %ecx
        ret
-SYM_FUNC_END(efi_call_phys)
-.previous
-
-.data
-saved_return_addr:
-       .long 0
-efi_rt_function_ptr:
-       .long 0
+SYM_FUNC_END(efi_call_svam)
index b1d2313fe3bfb4dc1c75b268c5e9bbe9e323a346..15da118f04f04aec93b1c561b97fc326e279958b 100644 (file)
@@ -8,41 +8,12 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/msr.h>
-#include <asm/processor-flags.h>
-#include <asm/page_types.h>
+#include <asm/nospec-branch.h>
 
-#define SAVE_XMM                       \
-       mov %rsp, %rax;                 \
-       subq $0x70, %rsp;               \
-       and $~0xf, %rsp;                \
-       mov %rax, (%rsp);               \
-       mov %cr0, %rax;                 \
-       clts;                           \
-       mov %rax, 0x8(%rsp);            \
-       movaps %xmm0, 0x60(%rsp);       \
-       movaps %xmm1, 0x50(%rsp);       \
-       movaps %xmm2, 0x40(%rsp);       \
-       movaps %xmm3, 0x30(%rsp);       \
-       movaps %xmm4, 0x20(%rsp);       \
-       movaps %xmm5, 0x10(%rsp)
-
-#define RESTORE_XMM                    \
-       movaps 0x60(%rsp), %xmm0;       \
-       movaps 0x50(%rsp), %xmm1;       \
-       movaps 0x40(%rsp), %xmm2;       \
-       movaps 0x30(%rsp), %xmm3;       \
-       movaps 0x20(%rsp), %xmm4;       \
-       movaps 0x10(%rsp), %xmm5;       \
-       mov 0x8(%rsp), %rsi;            \
-       mov %rsi, %cr0;                 \
-       mov (%rsp), %rsp
-
-SYM_FUNC_START(efi_call)
+SYM_FUNC_START(__efi_call)
        pushq %rbp
        movq %rsp, %rbp
-       SAVE_XMM
+       and $~0xf, %rsp
        mov 16(%rbp), %rax
        subq $48, %rsp
        mov %r9, 32(%rsp)
@@ -50,9 +21,7 @@ SYM_FUNC_START(efi_call)
        mov %r8, %r9
        mov %rcx, %r8
        mov %rsi, %rcx
-       call *%rdi
-       addq $48, %rsp
-       RESTORE_XMM
-       popq %rbp
+       CALL_NOSPEC %rdi
+       leave
        ret
-SYM_FUNC_END(efi_call)
+SYM_FUNC_END(__efi_call)
index 3189f139470183eb6a00844dc228f27aa0fe71b9..26f0da238c1ca8d7c09626e7f77547056227185d 100644 (file)
 
        .text
        .code64
-SYM_FUNC_START(efi64_thunk)
+SYM_CODE_START(__efi64_thunk)
        push    %rbp
        push    %rbx
 
        /*
         * Switch to 1:1 mapped 32-bit stack pointer.
         */
-       movq    %rsp, efi_saved_sp(%rip)
+       movq    %rsp, %rax
        movq    efi_scratch(%rip), %rsp
+       push    %rax
 
        /*
         * Calculate the physical address of the kernel text.
@@ -41,113 +42,31 @@ SYM_FUNC_START(efi64_thunk)
        movq    $__START_KERNEL_map, %rax
        subq    phys_base(%rip), %rax
 
-       /*
-        * Push some physical addresses onto the stack. This is easier
-        * to do now in a code64 section while the assembler can address
-        * 64-bit values. Note that all the addresses on the stack are
-        * 32-bit.
-        */
-       subq    $16, %rsp
-       leaq    efi_exit32(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 8(%rsp)
-
-       leaq    __efi64_thunk(%rip), %rbx
+       leaq    1f(%rip), %rbp
+       leaq    2f(%rip), %rbx
+       subq    %rax, %rbp
        subq    %rax, %rbx
-       call    *%rbx
-
-       movq    efi_saved_sp(%rip), %rsp
-       pop     %rbx
-       pop     %rbp
-       retq
-SYM_FUNC_END(efi64_thunk)
 
-/*
- * We run this function from the 1:1 mapping.
- *
- * This function must be invoked with a 1:1 mapped stack.
- */
-SYM_FUNC_START_LOCAL(__efi64_thunk)
-       movl    %ds, %eax
-       push    %rax
-       movl    %es, %eax
-       push    %rax
-       movl    %ss, %eax
-       push    %rax
-
-       subq    $32, %rsp
-       movl    %esi, 0x0(%rsp)
-       movl    %edx, 0x4(%rsp)
-       movl    %ecx, 0x8(%rsp)
-       movq    %r8, %rsi
-       movl    %esi, 0xc(%rsp)
-       movq    %r9, %rsi
-       movl    %esi,  0x10(%rsp)
-
-       leaq    1f(%rip), %rbx
-       movq    %rbx, func_rt_ptr(%rip)
+       subq    $28, %rsp
+       movl    %ebx, 0x0(%rsp)         /* return address */
+       movl    %esi, 0x4(%rsp)
+       movl    %edx, 0x8(%rsp)
+       movl    %ecx, 0xc(%rsp)
+       movl    %r8d, 0x10(%rsp)
+       movl    %r9d, 0x14(%rsp)
 
        /* Switch to 32-bit descriptor */
        pushq   $__KERNEL32_CS
-       leaq    efi_enter32(%rip), %rax
-       pushq   %rax
+       pushq   %rdi                    /* EFI runtime service address */
        lretq
 
-1:     addq    $32, %rsp
-
+1:     movq    24(%rsp), %rsp
        pop     %rbx
-       movl    %ebx, %ss
-       pop     %rbx
-       movl    %ebx, %es
-       pop     %rbx
-       movl    %ebx, %ds
-
-       /*
-        * Convert 32-bit status code into 64-bit.
-        */
-       test    %rax, %rax
-       jz      1f
-       movl    %eax, %ecx
-       andl    $0x0fffffff, %ecx
-       andl    $0xf0000000, %eax
-       shl     $32, %rax
-       or      %rcx, %rax
-1:
-       ret
-SYM_FUNC_END(__efi64_thunk)
-
-SYM_FUNC_START_LOCAL(efi_exit32)
-       movq    func_rt_ptr(%rip), %rax
-       push    %rax
-       mov     %rdi, %rax
-       ret
-SYM_FUNC_END(efi_exit32)
+       pop     %rbp
+       retq
 
        .code32
-/*
- * EFI service pointer must be in %edi.
- *
- * The stack should represent the 32-bit calling convention.
- */
-SYM_FUNC_START_LOCAL(efi_enter32)
-       movl    $__KERNEL_DS, %eax
-       movl    %eax, %ds
-       movl    %eax, %es
-       movl    %eax, %ss
-
-       call    *%edi
-
-       /* We must preserve return value */
-       movl    %eax, %edi
-
-       movl    72(%esp), %eax
-       pushl   $__KERNEL_CS
-       pushl   %eax
-
+2:     pushl   $__KERNEL_CS
+       pushl   %ebp
        lret
-SYM_FUNC_END(efi_enter32)
-
-       .data
-       .balign 8
-func_rt_ptr:           .quad 0
-efi_saved_sp:          .quad 0
+SYM_CODE_END(__efi64_thunk)
index 7675cf754d9090f067be7c2d1eb24a74d81d7ea3..88d32c06cffafe3c1777d05fc20b7d2871a34ba9 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/efi.h>
 #include <asm/uv/uv.h>
 #include <asm/cpu_device_id.h>
+#include <asm/realmode.h>
 #include <asm/reboot.h>
 
 #define EFI_MIN_RESERVE 5120
@@ -243,7 +244,7 @@ EXPORT_SYMBOL_GPL(efi_query_variable_store);
  */
 void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
 {
-       phys_addr_t new_phys, new_size;
+       struct efi_memory_map_data data = { 0 };
        struct efi_mem_range mr;
        efi_memory_desc_t md;
        int num_entries;
@@ -260,10 +261,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
-       /* No need to reserve regions that will never be freed. */
-       if (md.attribute & EFI_MEMORY_RUNTIME)
-               return;
-
        size += addr % EFI_PAGE_SIZE;
        size = round_up(size, EFI_PAGE_SIZE);
        addr = round_down(addr, EFI_PAGE_SIZE);
@@ -275,24 +272,23 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
        num_entries = efi_memmap_split_count(&md, &mr.range);
        num_entries += efi.memmap.nr_map;
 
-       new_size = efi.memmap.desc_size * num_entries;
-
-       new_phys = efi_memmap_alloc(num_entries);
-       if (!new_phys) {
+       if (efi_memmap_alloc(num_entries, &data) != 0) {
                pr_err("Could not allocate boot services memmap\n");
                return;
        }
 
-       new = early_memremap(new_phys, new_size);
+       new = early_memremap(data.phys_map, data.size);
        if (!new) {
                pr_err("Failed to map new boot services memmap\n");
                return;
        }
 
        efi_memmap_insert(&efi.memmap, new, &mr);
-       early_memunmap(new, new_size);
+       early_memunmap(new, data.size);
 
-       efi_memmap_install(new_phys, num_entries);
+       efi_memmap_install(&data);
+       e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
+       e820__update_table(e820_table);
 }
 
 /*
@@ -386,10 +382,10 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
 
        /*
         * To Do: Remove this check after adding functionality to unmap EFI boot
-        * services code/data regions from direct mapping area because
-        * "efi=old_map" maps EFI regions in swapper_pg_dir.
+        * services code/data regions from direct mapping area because the UV1
+        * memory map maps EFI regions in swapper_pg_dir.
         */
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_have_uv1_memmap())
                return;
 
        /*
@@ -397,7 +393,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
         * EFI runtime calls, hence don't unmap EFI boot services code/data
         * regions.
         */
-       if (!efi_is_native())
+       if (efi_is_mixed())
                return;
 
        if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
@@ -409,7 +405,7 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
 
 void __init efi_free_boot_services(void)
 {
-       phys_addr_t new_phys, new_size;
+       struct efi_memory_map_data data = { 0 };
        efi_memory_desc_t *md;
        int num_entries = 0;
        void *new, *new_md;
@@ -464,14 +460,12 @@ void __init efi_free_boot_services(void)
        if (!num_entries)
                return;
 
-       new_size = efi.memmap.desc_size * num_entries;
-       new_phys = efi_memmap_alloc(num_entries);
-       if (!new_phys) {
+       if (efi_memmap_alloc(num_entries, &data) != 0) {
                pr_err("Failed to allocate new EFI memmap\n");
                return;
        }
 
-       new = memremap(new_phys, new_size, MEMREMAP_WB);
+       new = memremap(data.phys_map, data.size, MEMREMAP_WB);
        if (!new) {
                pr_err("Failed to map new EFI memmap\n");
                return;
@@ -495,7 +489,7 @@ void __init efi_free_boot_services(void)
 
        memunmap(new);
 
-       if (efi_memmap_install(new_phys, num_entries)) {
+       if (efi_memmap_install(&data) != 0) {
                pr_err("Could not install new EFI memmap\n");
                return;
        }
@@ -560,7 +554,7 @@ out:
        return ret;
 }
 
-static const struct dmi_system_id sgi_uv1_dmi[] = {
+static const struct dmi_system_id sgi_uv1_dmi[] __initconst = {
        { NULL, "SGI UV1",
                {       DMI_MATCH(DMI_PRODUCT_NAME,     "Stoutland Platform"),
                        DMI_MATCH(DMI_PRODUCT_VERSION,  "1.0"),
@@ -583,8 +577,15 @@ void __init efi_apply_memmap_quirks(void)
        }
 
        /* UV2+ BIOS has a fix for this issue.  UV1 still needs the quirk. */
-       if (dmi_check_system(sgi_uv1_dmi))
-               set_bit(EFI_OLD_MEMMAP, &efi.flags);
+       if (dmi_check_system(sgi_uv1_dmi)) {
+               if (IS_ENABLED(CONFIG_X86_UV)) {
+                       set_bit(EFI_UV1_MEMMAP, &efi.flags);
+               } else {
+                       pr_warn("EFI runtime disabled, needs CONFIG_X86_UV=y on UV1\n");
+                       clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+                       efi_memmap_unmap();
+               }
+       }
 }
 
 /*
@@ -722,7 +723,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
        /*
         * Make sure that an efi runtime service caused the page fault.
         * "efi_mm" cannot be used to check if the page fault had occurred
-        * in the firmware context because efi=old_map doesn't use efi_pgd.
+        * in the firmware context because the UV1 memmap doesn't use efi_pgd.
         */
        if (efi_rts_work.efi_rts_id == EFI_NONE)
                return;
index 6dd25dc5f0279a420aa619662e82e7586ddb22b5..e9d97d52475e15136b1215aba3f95446b7080408 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/cpu_device_id.h>
 #include <asm/imr.h>
 #include <asm/iosf_mbi.h>
+#include <asm/io.h>
+
 #include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/mm.h>
index 42f879b75f9b9d7536a659c6079ab880cd04daf9..4307830e1b6f027e8dd53c07c463c17aca7d9871 100644 (file)
@@ -14,6 +14,8 @@
 #include <asm-generic/sections.h>
 #include <asm/cpu_device_id.h>
 #include <asm/imr.h>
+#include <asm/io.h>
+
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/types.h>
index ece9cb9c1189bdaf135e1a07375c0334d2f3fc47..607f58147311c7bb763fce47b12fece4c9167458 100644 (file)
@@ -31,13 +31,16 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
                return BIOS_STATUS_UNIMPLEMENTED;
 
        /*
-        * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
+        * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
         * callback method, which uses efi_call() directly, with the kernel page tables:
         */
-       if (unlikely(efi_enabled(EFI_OLD_MEMMAP)))
+       if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
+               kernel_fpu_begin();
                ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
-       else
+               kernel_fpu_end();
+       } else {
                ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+       }
 
        return ret;
 }
@@ -214,3 +217,163 @@ int uv_bios_init(void)
        pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
        return 0;
 }
+
+static void __init early_code_mapping_set_exec(int executable)
+{
+       efi_memory_desc_t *md;
+
+       if (!(__supported_pte_mask & _PAGE_NX))
+               return;
+
+       /* Make EFI service code area executable */
+       for_each_efi_memory_desc(md) {
+               if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+                   md->type == EFI_BOOT_SERVICES_CODE)
+                       efi_set_executable(md, executable);
+       }
+}
+
+void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
+{
+       /*
+        * After the lock is released, the original page table is restored.
+        */
+       int pgd_idx, i;
+       int nr_pgds;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+
+       nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+
+       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+               pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
+               set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
+
+               if (!pgd_present(*pgd))
+                       continue;
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       p4d = p4d_offset(pgd,
+                                        pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+                       if (!p4d_present(*p4d))
+                               continue;
+
+                       pud = (pud_t *)p4d_page_vaddr(*p4d);
+                       pud_free(&init_mm, pud);
+               }
+
+               p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+               p4d_free(&init_mm, p4d);
+       }
+
+       kfree(save_pgd);
+
+       __flush_tlb_all();
+       early_code_mapping_set_exec(0);
+}
+
+pgd_t * __init efi_uv1_memmap_phys_prolog(void)
+{
+       unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+       pgd_t *save_pgd, *pgd_k, *pgd_efi;
+       p4d_t *p4d, *p4d_k, *p4d_efi;
+       pud_t *pud;
+
+       int pgd;
+       int n_pgds, i, j;
+
+       early_code_mapping_set_exec(1);
+
+       n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
+       save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
+       if (!save_pgd)
+               return NULL;
+
+       /*
+        * Build 1:1 identity mapping for UV1 memmap usage. Note that
+        * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+        * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+        * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+        * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+        * This means here we can only reuse the PMD tables of the direct mapping.
+        */
+       for (pgd = 0; pgd < n_pgds; pgd++) {
+               addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+               vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+               pgd_efi = pgd_offset_k(addr_pgd);
+               save_pgd[pgd] = *pgd_efi;
+
+               p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+               if (!p4d) {
+                       pr_err("Failed to allocate p4d table!\n");
+                       goto out;
+               }
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       addr_p4d = addr_pgd + i * P4D_SIZE;
+                       p4d_efi = p4d + p4d_index(addr_p4d);
+
+                       pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+                       if (!pud) {
+                               pr_err("Failed to allocate pud table!\n");
+                               goto out;
+                       }
+
+                       for (j = 0; j < PTRS_PER_PUD; j++) {
+                               addr_pud = addr_p4d + j * PUD_SIZE;
+
+                               if (addr_pud > (max_pfn << PAGE_SHIFT))
+                                       break;
+
+                               vaddr = (unsigned long)__va(addr_pud);
+
+                               pgd_k = pgd_offset_k(vaddr);
+                               p4d_k = p4d_offset(pgd_k, vaddr);
+                               pud[j] = *pud_offset(p4d_k, vaddr);
+                       }
+               }
+               pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
+       }
+
+       __flush_tlb_all();
+       return save_pgd;
+out:
+       efi_uv1_memmap_phys_epilog(save_pgd);
+       return NULL;
+}
+
+void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
+                                u32 type, u64 attribute)
+{
+       unsigned long last_map_pfn;
+
+       if (type == EFI_MEMORY_MAPPED_IO)
+               return ioremap(phys_addr, size);
+
+       last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
+       if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
+               unsigned long top = last_map_pfn << PAGE_SHIFT;
+               efi_ioremap(top, size - (top - phys_addr), type, attribute);
+       }
+
+       if (!(attribute & EFI_MEMORY_WB))
+               efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
+
+       return (void __iomem *)__va(phys_addr);
+}
+
+static int __init arch_parse_efi_cmdline(char *str)
+{
+       if (!str) {
+               pr_warn("need at least one option\n");
+               return -EINVAL;
+       }
+
+       if (!efi_is_mixed() && parse_option_str(str, "old_map"))
+               set_bit(EFI_UV1_MEMMAP, &efi.flags);
+
+       return 0;
+}
+early_param("efi", arch_parse_efi_cmdline);
index 5bd949da7a4a5deb659f19e849dcd6036a2843ac..ac8eee093f9cd0fa112fe89e945202eb60203a84 100644 (file)
@@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info,
        return 0;
 }
 
-int arch_copy_tls(struct task_struct *new)
+int arch_set_tls(struct task_struct *new, unsigned long tls)
 {
        struct user_desc info;
        int idx, ret = -EFAULT;
 
-       if (copy_from_user(&info,
-                          (void __user *) UPT_SI(&new->thread.regs.regs),
-                          sizeof(info)))
+       if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
                goto out;
 
        ret = -EINVAL;
index 3a621e0d39253aa30a7d5eadb523cd4a2e1315cb..ebd3855d9b132379b7065222316a948e06b2b72e 100644 (file)
@@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task)
 {
 }
 
-int arch_copy_tls(struct task_struct *t)
+int arch_set_tls(struct task_struct *t, unsigned long tls)
 {
        /*
         * If CLONE_SETTLS is set, we need to save the thread id
-        * (which is argument 5, child_tid, of clone) so it can be set
-        * during context switches.
+        * so it can be set during context switches.
         */
-       t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
+       t->thread.arch.fs = tls;
 
        return 0;
 }
index a04551ee5568b958be51634346054e3b5b7c5c93..1abe455d926a2785eb8049576f9ad345fbff2d8e 100644 (file)
@@ -31,7 +31,7 @@ static efi_system_table_t efi_systab_xen __initdata = {
        .con_in_handle  = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
        .con_in         = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
        .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
-       .con_out        = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
+       .con_out        = NULL,                   /* Not used under Xen. */
        .stderr_handle  = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
        .stderr         = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
        .runtime        = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
index c8dbee62ec2ab1ab11ce668388cb9d86f56c94af..bbba8b17829a187a9efdd3dc1de6cfbc398a7ee6 100644 (file)
@@ -67,7 +67,7 @@
 #include <asm/linkage.h>
 #include <asm/page.h>
 #include <asm/init.h>
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #include <asm/smp.h>
 #include <asm/tlb.h>
 
index 4a3fa295d8fed4a9427ea8c1eecf9832040d766a..1c645172b4b53f14791a9d4949c8665316772088 100644 (file)
@@ -11,7 +11,7 @@ config XTENSA
        select ARCH_USE_QUEUED_SPINLOCKS
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_IPC_PARSE_VERSION
-       select BUILDTIME_EXTABLE_SORT
+       select BUILDTIME_TABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
        select DMA_REMAP if MMU
@@ -24,6 +24,7 @@ config XTENSA
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_EXIT_THREAD
diff --git a/arch/xtensa/include/asm/vmalloc.h b/arch/xtensa/include/asm/vmalloc.h
new file mode 100644 (file)
index 0000000..0eb94b7
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _ASM_XTENSA_VMALLOC_H
+#define _ASM_XTENSA_VMALLOC_H
+
+#endif /* _ASM_XTENSA_VMALLOC_H */
index be897803834a48a3734ccd021780ba82b992a932..2c9e48566e483cc359cf3a071dd26215d0f82dea 100644 (file)
@@ -520,7 +520,7 @@ common_exception_return:
        call4   schedule        # void schedule (void)
        j       1b
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 6:
        _bbci.l a4, TIF_NEED_RESCHED, 4f
 
index 9e1c49134c07eca6a172e4b0f43e5ffeeef61f50..3edecc41ef8c36ffefb6a6b6ac1be62c1d9a296d 100644 (file)
@@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  * involved.  Much simpler to just not copy those live frames across.
  */
 
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
-               unsigned long thread_fn_arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
+               unsigned long thread_fn_arg, struct task_struct *p,
+               unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
 
@@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
 
                childregs->syscall = regs->syscall;
 
-               /* The thread pointer is passed in the '4th argument' (= a5) */
                if (clone_flags & CLONE_SETTLS)
-                       childregs->threadptr = childregs->areg[5];
+                       childregs->threadptr = tls;
        } else {
                p->thread.ra = MAKE_RA_FOR_CALL(
                                (unsigned long)ret_from_kernel_thread, 1);
index 87bd68dd7687cfabec3ea80f46908170099e4224..0976e27b8d5dada29cc15b48a22c12d10e1a7167 100644 (file)
@@ -519,12 +519,15 @@ DEFINE_SPINLOCK(die_lock);
 void die(const char * str, struct pt_regs * regs, long err)
 {
        static int die_counter;
+       const char *pr = "";
+
+       if (IS_ENABLED(CONFIG_PREEMPTION))
+               pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
 
        console_verbose();
        spin_lock_irq(&die_lock);
 
-       pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
-               IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
+       pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
        show_regs(regs);
        if (!user_mode(regs))
                show_stack(NULL, (unsigned long*)regs->areg[1]);
index c23094a14a2becb2b06d1f184487c6e0fe5cedab..3bc76bb113a08c236ef93ecf37fdf410a617ea5f 100644 (file)
@@ -66,7 +66,6 @@ config BLK_DEV_BSGLIB
 
 config BLK_DEV_INTEGRITY
        bool "Block layer data integrity support"
-       select CRC_T10DIF if BLK_DEV_INTEGRITY
        ---help---
        Some storage devices allow extra information to be
        stored/retrieved to help protect the data.  The block layer
@@ -77,6 +76,11 @@ config BLK_DEV_INTEGRITY
        T10/SCSI Data Integrity Field or the T13/ATA External Path
        Protection.  If in doubt, say N.
 
+config BLK_DEV_INTEGRITY_T10
+       tristate
+       depends on BLK_DEV_INTEGRITY
+       select CRC_T10DIF
+
 config BLK_DEV_ZONED
        bool "Zoned block device support"
        select MQ_IOSCHED_DEADLINE
index 205a5f2fef17f7c704a5115ac9ddf5072b5b09a4..f6cef6d4363c672cc18f9a2bd893f9ace6de6573 100644 (file)
@@ -27,7 +27,8 @@ obj-$(CONFIG_IOSCHED_BFQ)     += bfq.o
 
 obj-$(CONFIG_BLOCK_COMPAT)     += compat_ioctl.o
 obj-$(CONFIG_BLK_CMDLINE_PARSER)       += cmdline-parser.o
-obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY_T10)    += t10-pi.o
 obj-$(CONFIG_BLK_MQ_PCI)       += blk-mq-pci.o
 obj-$(CONFIG_BLK_MQ_VIRTIO)    += blk-mq-virtio.o
 obj-$(CONFIG_BLK_MQ_RDMA)      += blk-mq-rdma.o
index ad4af4aaf2ced06ef0afd06ea82ac371b1b38546..4686b68b48b4cb7b4539af3ec4cb0090bd654d3d 100644 (file)
@@ -427,7 +427,6 @@ void bfq_schedule_dispatch(struct bfq_data *bfqd)
 }
 
 #define bfq_class_idle(bfqq)   ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
-#define bfq_class_rt(bfqq)     ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
 
 #define bfq_sample_valid(samples)      ((samples) > 80)
 
index 05f0bf4a1144d79fc26e0024221907f1066182c5..ffe9ce9faa892df287c786e41801e441982bb3d9 100644 (file)
@@ -277,10 +277,7 @@ struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
  */
 static u64 bfq_delta(unsigned long service, unsigned long weight)
 {
-       u64 d = (u64)service << WFQ_SERVICE_SHIFT;
-
-       do_div(d, weight);
-       return d;
+       return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight);
 }
 
 /**
index 9d54aa37ce6c7074be56e99a14bc60d0bc058edd..94d697217887aa1b9e8b214c7606a6f1e7bf8b42 100644 (file)
@@ -538,6 +538,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 }
 EXPORT_SYMBOL(zero_fill_bio_iter);
 
+/**
+ * bio_truncate - truncate the bio to small size of @new_size
+ * @bio:       the bio to be truncated
+ * @new_size:  new size for truncating the bio
+ *
+ * Description:
+ *   Truncate the bio to new size of @new_size. If bio_op(bio) is
+ *   REQ_OP_READ, zero the truncated part. This function should only
+ *   be used for handling corner cases, such as bio eod.
+ */
+void bio_truncate(struct bio *bio, unsigned new_size)
+{
+       struct bio_vec bv;
+       struct bvec_iter iter;
+       unsigned int done = 0;
+       bool truncated = false;
+
+       if (new_size >= bio->bi_iter.bi_size)
+               return;
+
+       if (bio_op(bio) != REQ_OP_READ)
+               goto exit;
+
+       bio_for_each_segment(bv, bio, iter) {
+               if (done + bv.bv_len > new_size) {
+                       unsigned offset;
+
+                       if (!truncated)
+                               offset = new_size - done;
+                       else
+                               offset = 0;
+                       zero_user(bv.bv_page, offset, bv.bv_len - offset);
+                       truncated = true;
+               }
+               done += bv.bv_len;
+       }
+
+ exit:
+       /*
+        * Don't touch bvec table here and make it really immutable, since
+        * fs bio user has to retrieve all pages via bio_for_each_segment_all
+        * in its .end_bio() callback.
+        *
+        * It is enough to truncate bio by updating .bi_size since we can make
+        * correct bvec with the updated .bi_size for drivers.
+        */
+       bio->bi_iter.bi_size = new_size;
+}
+
 /**
  * bio_put - release a reference to a bio
  * @bio:   bio to release reference to
@@ -754,10 +803,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
                return false;
 
-       if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
+       if (bio->bi_vcnt > 0) {
                struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 
                if (page_is_mergeable(bv, page, len, off, same_page)) {
+                       if (bio->bi_iter.bi_size > UINT_MAX - len)
+                               return false;
                        bv->bv_len += len;
                        bio->bi_iter.bi_size += len;
                        return true;
index 708dea92dac8c6037dd1716e3d1bccadf346abac..a229b94d53908aa35ff3f2d8d1fa3532df4c2899 100644 (file)
@@ -1061,26 +1061,6 @@ err_unlock:
        return PTR_ERR(blkg);
 }
 
-/**
- * blkcg_drain_queue - drain blkcg part of request_queue
- * @q: request_queue to drain
- *
- * Called from blk_drain_queue().  Responsible for draining blkcg part.
- */
-void blkcg_drain_queue(struct request_queue *q)
-{
-       lockdep_assert_held(&q->queue_lock);
-
-       /*
-        * @q could be exiting and already have destroyed all blkgs as
-        * indicated by NULL root_blkg.  If so, don't confuse policies.
-        */
-       if (!q->root_blkg)
-               return;
-
-       blk_throtl_drain(q);
-}
-
 /**
  * blkcg_exit_queue - exit and release blkcg part of request_queue
  * @q: request_queue being released
index a1e228752083f411f114cd990fa4cad546e3ca45..089e890ab208fd01efbda68aea4fdb47aaab1353 100644 (file)
@@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio)
        }
 
        /*
-        * For a REQ_NOWAIT based request, return -EOPNOTSUPP
-        * if queue is not a request based queue.
+        * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
+        * with BLK_STS_AGAIN status in order to catch -EAGAIN and
+        * to give a chance to the caller to repeat request gracefully.
         */
-       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
-               goto not_supported;
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
+               status = BLK_STS_AGAIN;
+               goto end_io;
+       }
 
        if (should_fail_bio(bio))
                goto end_io;
@@ -1310,7 +1313,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
 
 void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
-       if (blk_do_io_stat(req)) {
+       if (req->part && blk_do_io_stat(req)) {
                const int sgrp = op_stat_group(req_op(req));
                struct hd_struct *part;
 
@@ -1328,7 +1331,8 @@ void blk_account_io_done(struct request *req, u64 now)
         * normal IO on queueing nor completion.  Accounting the
         * containing request is enough.
         */
-       if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
+       if (req->part && blk_do_io_stat(req) &&
+           !(req->rq_flags & RQF_FLUSH_SEQ)) {
                const int sgrp = op_stat_group(req_op(req));
                struct hd_struct *part;
 
@@ -1792,9 +1796,9 @@ int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
        BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
-                       FIELD_SIZEOF(struct request, cmd_flags));
+                       sizeof_field(struct request, cmd_flags));
        BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
-                       FIELD_SIZEOF(struct bio, bi_opf));
+                       sizeof_field(struct bio, bi_opf));
 
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
index 1777346baf06f23d6c4411b77bcd12e573eea3b6..3f977c517960e61dca951551d437b52abcacd588 100644 (file)
@@ -69,6 +69,7 @@
 #include <linux/blkdev.h>
 #include <linux/gfp.h>
 #include <linux/blk-mq.h>
+#include <linux/lockdep.h>
 
 #include "blk.h"
 #include "blk-mq.h"
@@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
        INIT_LIST_HEAD(&fq->flush_queue[1]);
        INIT_LIST_HEAD(&fq->flush_data_in_flight);
 
+       lockdep_register_key(&fq->key);
+       lockdep_set_class(&fq->mq_flush_lock, &fq->key);
+
        return fq;
 
  fail_rq:
@@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
        if (!fq)
                return;
 
+       lockdep_unregister_key(&fq->key);
        kfree(fq->flush_rq);
        kfree(fq);
 }
index e01267f9918390f38424337140778c2c983bb7a4..27ca68621137ad5418e647c859b39b12a588f9c2 100644 (file)
@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
 {
        struct ioc *ioc = iocg->ioc;
        struct blkcg_gq *blkg = iocg_to_blkg(iocg);
@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
        /* clear or maintain depending on the overage */
        if (time_before_eq64(vtime, now->vnow)) {
                blkcg_clear_delay(blkg);
-               return;
+               return false;
        }
        if (!atomic_read(&blkg->use_delay) &&
            time_before_eq64(vtime, now->vnow + vmargin))
-               return;
+               return false;
 
        /* use delay */
        if (cost) {
@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
        oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
        if (hrtimer_is_queued(&iocg->delay_timer) &&
            abs(oexpires - expires) <= margin_ns / 4)
-               return;
+               return true;
 
        hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
                               margin_ns / 4, HRTIMER_MODE_ABS);
+       return true;
 }
 
 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
         */
        if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
                atomic64_add(abs_cost, &iocg->abs_vdebt);
-               iocg_kick_delay(iocg, &now, cost);
+               if (iocg_kick_delay(iocg, &now, cost))
+                       blkcg_schedule_throttle(rqos->q,
+                                       (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
                return;
        }
 
index 3a62e471d81bd1b70aae0052d4a72101504fbf46..b0790268ed9d9ad804b8b3d396ae16cdc8e229aa 100644 (file)
@@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
        return 0;
 
 unmap_rq:
-       __blk_rq_unmap_user(bio);
+       blk_rq_unmap_user(bio);
 fail:
        rq->bio = NULL;
        return ret;
index d783bdc4559b4de54b19364579326b9f618ebe15..1534ed736363fd807998525d3ddd828e566004f8 100644 (file)
@@ -157,17 +157,20 @@ static inline unsigned get_max_io_size(struct request_queue *q,
        return sectors & (lbs - 1);
 }
 
-static unsigned get_max_segment_size(const struct request_queue *q,
-                                    unsigned offset)
+static inline unsigned get_max_segment_size(const struct request_queue *q,
+                                           struct page *start_page,
+                                           unsigned long offset)
 {
        unsigned long mask = queue_segment_boundary(q);
 
-       /* default segment boundary mask means no boundary limit */
-       if (mask == BLK_SEG_BOUNDARY_MASK)
-               return queue_max_segment_size(q);
+       offset = mask & (page_to_phys(start_page) + offset);
 
-       return min_t(unsigned long, mask - (mask & offset) + 1,
-                    queue_max_segment_size(q));
+       /*
+        * overflow may be triggered in case of zero page physical address
+        * on 32bit arch, use queue's max segment size when that happens.
+        */
+       return min_not_zero(mask - offset + 1,
+                       (unsigned long)queue_max_segment_size(q));
 }
 
 /**
@@ -201,7 +204,8 @@ static bool bvec_split_segs(const struct request_queue *q,
        unsigned seg_size = 0;
 
        while (len && *nsegs < max_segs) {
-               seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+               seg_size = get_max_segment_size(q, bv->bv_page,
+                                               bv->bv_offset + total_len);
                seg_size = min(seg_size, len);
 
                (*nsegs)++;
@@ -419,7 +423,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
 
        while (nbytes > 0) {
                unsigned offset = bvec->bv_offset + total;
-               unsigned len = min(get_max_segment_size(q, offset), nbytes);
+               unsigned len = min(get_max_segment_size(q, bvec->bv_page,
+                                       offset), nbytes);
                struct page *page = bvec->bv_page;
 
                /*
index 323c9cb28066bcc385ea77d43efcb89b36a55820..a12b1763508d3194853b6d33a057a62da62ea0e0 100644 (file)
@@ -641,6 +641,14 @@ bool blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+/**
+ * blk_mq_start_request - Start processing a request
+ * @rq: Pointer to request to be started
+ *
+ * Function used by device drivers to notify the block layer that a request
+ * is going to be processed now, so blk layer can do proper initializations
+ * such as starting the timeout timer.
+ */
 void blk_mq_start_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -1327,6 +1335,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
        return (queued + errors) != 0;
 }
 
+/**
+ * __blk_mq_run_hw_queue - Run a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+ *
+ * Send pending requests to the hardware.
+ */
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
        int srcu_idx;
@@ -1424,6 +1438,15 @@ select_cpu:
        return next_cpu;
 }
 
+/**
+ * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+ * @async: If we want to run the queue asynchronously.
+ * @msecs: Microseconds of delay to wait before running the queue.
+ *
+ * If !@async, try to run the queue now. Else, run the queue asynchronously and
+ * with a delay of @msecs.
+ */
 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
                                        unsigned long msecs)
 {
@@ -1445,12 +1468,28 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
                                    msecs_to_jiffies(msecs));
 }
 
+/**
+ * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
+ * @hctx: Pointer to the hardware queue to run.
+ * @msecs: Microseconds of delay to wait before running the queue.
+ *
+ * Run a hardware queue asynchronously with a delay of @msecs.
+ */
 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 {
        __blk_mq_delay_run_hw_queue(hctx, true, msecs);
 }
 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
 
+/**
+ * blk_mq_run_hw_queue - Start to run a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+ * @async: If we want to run the queue asynchronously.
+ *
+ * Check if the request queue is not in a quiesced state and if there are
+ * pending requests to be sent. If this is true, run the queue to send requests
+ * to hardware.
+ */
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
        int srcu_idx;
@@ -1474,6 +1513,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 }
 EXPORT_SYMBOL(blk_mq_run_hw_queue);
 
+/**
+ * blk_mq_run_hw_queue - Run all hardware queues in a request queue.
+ * @q: Pointer to the request queue to run.
+ * @async: If we want to run the queue asynchronously.
+ */
 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
 {
        struct blk_mq_hw_ctx *hctx;
@@ -1625,7 +1669,11 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
-/*
+/**
+ * blk_mq_request_bypass_insert - Insert a request at dispatch list.
+ * @rq: Pointer to request to be inserted.
+ * @run_queue: If we should run the hardware queue after inserting the request.
+ *
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
@@ -1668,28 +1716,20 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        struct request *rqa = container_of(a, struct request, queuelist);
        struct request *rqb = container_of(b, struct request, queuelist);
 
-       if (rqa->mq_ctx < rqb->mq_ctx)
-               return -1;
-       else if (rqa->mq_ctx > rqb->mq_ctx)
-               return 1;
-       else if (rqa->mq_hctx < rqb->mq_hctx)
-               return -1;
-       else if (rqa->mq_hctx > rqb->mq_hctx)
-               return 1;
+       if (rqa->mq_ctx != rqb->mq_ctx)
+               return rqa->mq_ctx > rqb->mq_ctx;
+       if (rqa->mq_hctx != rqb->mq_hctx)
+               return rqa->mq_hctx > rqb->mq_hctx;
 
        return blk_rq_pos(rqa) > blk_rq_pos(rqb);
 }
 
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
-       struct blk_mq_hw_ctx *this_hctx;
-       struct blk_mq_ctx *this_ctx;
-       struct request_queue *this_q;
-       struct request *rq;
        LIST_HEAD(list);
-       LIST_HEAD(rq_list);
-       unsigned int depth;
 
+       if (list_empty(&plug->mq_list))
+               return;
        list_splice_init(&plug->mq_list, &list);
 
        if (plug->rq_count > 2 && plug->multiple_queues)
@@ -1697,42 +1737,27 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 
        plug->rq_count = 0;
 
-       this_q = NULL;
-       this_hctx = NULL;
-       this_ctx = NULL;
-       depth = 0;
-
-       while (!list_empty(&list)) {
-               rq = list_entry_rq(list.next);
-               list_del_init(&rq->queuelist);
-               BUG_ON(!rq->q);
-               if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
-                       if (this_hctx) {
-                               trace_block_unplug(this_q, depth, !from_schedule);
-                               blk_mq_sched_insert_requests(this_hctx, this_ctx,
-                                                               &rq_list,
-                                                               from_schedule);
-                       }
-
-                       this_q = rq->q;
-                       this_ctx = rq->mq_ctx;
-                       this_hctx = rq->mq_hctx;
-                       depth = 0;
+       do {
+               struct list_head rq_list;
+               struct request *rq, *head_rq = list_entry_rq(list.next);
+               struct list_head *pos = &head_rq->queuelist; /* skip first */
+               struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
+               struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
+               unsigned int depth = 1;
+
+               list_for_each_continue(pos, &list) {
+                       rq = list_entry_rq(pos);
+                       BUG_ON(!rq->q);
+                       if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
+                               break;
+                       depth++;
                }
 
-               depth++;
-               list_add_tail(&rq->queuelist, &rq_list);
-       }
-
-       /*
-        * If 'this_hctx' is set, we know we have entries to complete
-        * on 'rq_list'. Do those.
-        */
-       if (this_hctx) {
-               trace_block_unplug(this_q, depth, !from_schedule);
+               list_cut_before(&rq_list, &list, pos);
+               trace_block_unplug(head_rq->q, depth, !from_schedule);
                blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
                                                from_schedule);
-       }
+       } while(!list_empty(&list));
 }
 
 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
@@ -1828,6 +1853,17 @@ insert:
        return BLK_STS_OK;
 }
 
+/**
+ * blk_mq_try_issue_directly - Try to send a request directly to device driver.
+ * @hctx: Pointer of the associated hardware queue.
+ * @rq: Pointer to request to be sent.
+ * @cookie: Request queue cookie.
+ *
+ * If the device has enough resources to accept a new request now, send the
+ * request directly to device driver. Else, insert at hctx->dispatch queue, so
+ * we can try send it another time in the future. Requests inserted at this
+ * queue have higher priority.
+ */
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                struct request *rq, blk_qc_t *cookie)
 {
@@ -1905,6 +1941,22 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
        }
 }
 
+/**
+ * blk_mq_make_request - Create and send a request to block device.
+ * @q: Request queue pointer.
+ * @bio: Bio pointer.
+ *
+ * Builds up a request structure from @q and @bio and send to the device. The
+ * request may not be queued directly to hardware if:
+ * * This request can be merged with another one
+ * * We want to place request at plug queue for possible future merging
+ * * There is an IO scheduler active at this queue
+ *
+ * It will not queue the request if there is an error with the bio, or at the
+ * request creation.
+ *
+ * Returns: Request queue cookie.
+ */
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
        const int is_sync = op_is_sync(bio->bi_opf);
@@ -1950,7 +2002,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        plug = blk_mq_plug(q, bio);
        if (unlikely(is_flush_fua)) {
-               /* bypass scheduler for flush rq */
+               /* Bypass scheduler for flush requests */
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
        } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
@@ -1978,6 +2030,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_add_rq_to_plug(plug, rq);
        } else if (q->elevator) {
+               /* Insert the request at the IO scheduler queue */
                blk_mq_sched_insert_request(rq, false, true, true);
        } else if (plug && !blk_queue_nomerges(q)) {
                /*
@@ -2004,8 +2057,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) ||
                        !data.hctx->dispatch_busy) {
+               /*
+                * There is no scheduler and we can try to send directly
+                * to the hardware.
+                */
                blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
+               /* Default case. */
                blk_mq_sched_insert_request(rq, false, true, true);
        }
 
index 5f6dcc7a47bd92745341feee781cb2f2d3fa58b2..c8eda2e7b91e492453a9a454691865f4b8768a5d 100644 (file)
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
  *   storage device can address.  The default of 512 covers most
  *   hardware.
  **/
-void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
 {
        q->limits.logical_block_size = size;
 
index d00fcfd71dfeae047fab62b37ab117c4fc467e6b..05741c6f618be9162cf642faf83cbbc420023fe8 100644 (file)
@@ -198,7 +198,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
                        break;
                }
 
-               bio->bi_opf = op;
+               bio->bi_opf = op | REQ_SYNC;
                bio->bi_iter.bi_sector = sector;
                sector += zone_sectors;
 
index 6842f28c033e79fd3571c3514bdc4893d43a3871..0b8884353f6bf43694b23a83158781c79df0f046 100644 (file)
@@ -30,6 +30,7 @@ struct blk_flush_queue {
         * at the same time
         */
        struct request          *orig_rq;
+       struct lock_class_key   key;
        spinlock_t              mq_flush_lock;
 };
 
index 347dda16c2f46a48d87cc234a57a4ced250a4646..6cbb7926534cd7985e22ce280c4b29c66a900826 100644 (file)
@@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct request *req = bd->rq;
        struct bsg_set *bset =
                container_of(q->tag_set, struct bsg_set, tag_set);
-       int sts = BLK_STS_IOERR;
+       blk_status_t sts = BLK_STS_IOERR;
        int ret;
 
        blk_mq_start_request(req);
index 6ca015f92766e9052d444f9e81ba3c307846166c..3ed7a0f144a994b28aa2bc269cadece8c6a821d8 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/compat.h>
 #include <linux/elevator.h>
 #include <linux/hdreg.h>
+#include <linux/pr.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/types.h>
@@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
         * but we call blkdev_ioctl, which gets the lock for us
         */
        case BLKRRPART:
+       case BLKREPORTZONE:
+       case BLKRESETZONE:
+       case BLKOPENZONE:
+       case BLKCLOSEZONE:
+       case BLKFINISHZONE:
+       case BLKGETZONESZ:
+       case BLKGETNRZONES:
                return blkdev_ioctl(bdev, mode, cmd,
                                (unsigned long)compat_ptr(arg));
        case BLKBSZSET_32:
@@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKTRACETEARDOWN: /* compatible */
                ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
                return ret;
+       case IOC_PR_REGISTER:
+       case IOC_PR_RESERVE:
+       case IOC_PR_RELEASE:
+       case IOC_PR_PREEMPT:
+       case IOC_PR_PREEMPT_ABORT:
+       case IOC_PR_CLEAR:
+               return blkdev_ioctl(bdev, mode, cmd,
+                               (unsigned long)compat_ptr(arg));
        default:
                if (disk->fops->compat_ioctl)
                        ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
index 1d20c9cf213fe30db1af28baf883ab96a4f99651..564fae77711df6cb1bc45e7fdd3d0b8e17272e3f 100644 (file)
@@ -321,6 +321,24 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
        const char *dname;
        int err;
 
+       /*
+        * Partitions are not supported on zoned block devices that are used as
+        * such.
+        */
+       switch (disk->queue->limits.zoned) {
+       case BLK_ZONED_HM:
+               pr_warn("%s: partitions not supported on host managed zoned block device\n",
+                       disk->disk_name);
+               return ERR_PTR(-ENXIO);
+       case BLK_ZONED_HA:
+               pr_info("%s: disabling host aware zoned block device support due to partitions\n",
+                       disk->disk_name);
+               disk->queue->limits.zoned = BLK_ZONED_NONE;
+               break;
+       case BLK_ZONED_NONE:
+               break;
+       }
+
        err = disk_expand_part_tbl(disk, partno);
        if (err)
                return ERR_PTR(err);
@@ -501,7 +519,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
 
        part = add_partition(disk, p, from, size, state->parts[p].flags,
                             &state->parts[p].info);
-       if (IS_ERR(part)) {
+       if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
                printk(KERN_ERR " %s: p%d could not be added: %ld\n",
                       disk->disk_name, p, -PTR_ERR(part));
                return true;
@@ -540,10 +558,10 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
        }
 
        /*
-        * Partitions are not supported on zoned block devices.
+        * Partitions are not supported on host managed zoned block devices.
         */
-       if (bdev_is_zoned(bdev)) {
-               pr_warn("%s: ignoring partition table on zoned block device\n",
+       if (disk->queue->limits.zoned == BLK_ZONED_HM) {
+               pr_warn("%s: ignoring partition table on host managed zoned block device\n",
                        disk->disk_name);
                ret = 0;
                goto out_free_state;
index fe5d970e2e60bb873fe587b7a0163291b4397fa3..a2d97ee1908c9947a38e7ae25ddaf640b04f81c5 100644 (file)
@@ -1233,7 +1233,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
        BUG_ON (!data || !frags);
 
        if (size < 2 * VBLK_SIZE_HEAD) {
-               ldm_error("Value of size is to small.");
+               ldm_error("Value of size is too small.");
                return false;
        }
 
index f4907d941f0345fdead1845f13682a44a4c2a75d..d910534b3a410a62763e2f8fd09a8f2976b62d48 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/t10-pi.h>
 #include <linux/blkdev.h>
 #include <linux/crc-t10dif.h>
+#include <linux/module.h>
 #include <net/checksum.h>
 
 typedef __be16 (csum_fn) (void *, unsigned int);
@@ -280,3 +281,5 @@ const struct blk_integrity_profile t10_pi_type3_ip = {
        .complete_fn            = t10_pi_type3_complete,
 };
 EXPORT_SYMBOL(t10_pi_type3_ip);
+
+MODULE_LICENSE("GPL");
index aded260922684061047a2bceac0aaedc0d5ee8d8..9dc53cf9b1f17fb8fbefe6cb647f8b57f76d4c44 100644 (file)
@@ -436,10 +436,10 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
 
        BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
                     sizeof(struct adiantum_request_ctx));
-       subreq_size = max(FIELD_SIZEOF(struct adiantum_request_ctx,
+       subreq_size = max(sizeof_field(struct adiantum_request_ctx,
                                       u.hash_desc) +
                          crypto_shash_descsize(hash),
-                         FIELD_SIZEOF(struct adiantum_request_ctx,
+                         sizeof_field(struct adiantum_request_ctx,
                                       u.streamcipher_req) +
                          crypto_skcipher_reqsize(streamcipher));
 
index d16d893bd1959ca7d69eab2b011faa13c8e410c9..378b18b9bc342a5aa9afb4efb8cf180f0a1edd76 100644 (file)
@@ -470,6 +470,7 @@ static int tpm_key_encrypt(struct tpm_key *tk,
        if (ret < 0)
                goto error_free_tfm;
 
+       ret = -ENOMEM;
        req = akcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req)
                goto error_free_tfm;
index 364b9df9d631ff672a8fca0b74ecda0fb5b4ec11..d7f43d4ea925a0dcaa1fbce77dbef3b81fc4bc62 100644 (file)
@@ -184,6 +184,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
+       ret = -ENOMEM;
        req = akcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req)
                goto error_free_tfm;
index 808f2b3621068f6db28e7c3083f3176aeb111c09..495a2d1e1460eaaa3bc0e8c45be9660ba8cebd6f 100644 (file)
@@ -347,7 +347,7 @@ static int essiv_aead_init_tfm(struct crypto_aead *tfm)
        if (IS_ERR(aead))
                return PTR_ERR(aead);
 
-       subreq_size = FIELD_SIZEOF(struct essiv_aead_request_ctx, aead_req) +
+       subreq_size = sizeof_field(struct essiv_aead_request_ctx, aead_req) +
                      crypto_aead_reqsize(aead);
 
        tctx->ivoffset = offsetof(struct essiv_aead_request_ctx, aead_req) +
index 002838d23b86672ddf7899ac67dfbaa55f2ebdd6..cc57bab146b5c23dbbb264874879a3d19df54a57 100644 (file)
@@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS
 
 config ACPI_PROCESSOR_CSTATE
        def_bool y
+       depends on ACPI_PROCESSOR
        depends on IA64 || X86
 
 config ACPI_PROCESSOR_IDLE
index 433376e819bbe752d9cd2840e6efd4ad431dfc82..953437a216f63b02b9e9518e9f21e424e47cf835 100644 (file)
@@ -104,7 +104,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
 
        info->gaddr = lpit_native->residency_counter;
        if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-               info->iomem_addr = ioremap_nocache(info->gaddr.address,
+               info->iomem_addr = ioremap(info->gaddr.address,
                                                   info->gaddr.bit_width / 8);
                if (!info->iomem_addr)
                        return;
index 2c4dda0787e849af8034710760393e2b0684e5cd..5379bc3f275d76628742ce2b59e021b9d2535395 100644 (file)
@@ -705,3 +705,185 @@ void __init acpi_processor_init(void)
        acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
        acpi_scan_add_handler(&processor_container_handler);
 }
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+/**
+ * acpi_processor_claim_cst_control - Request _CST control from the platform.
+ */
+bool acpi_processor_claim_cst_control(void)
+{
+       static bool cst_control_claimed;
+       acpi_status status;
+
+       if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
+               return true;
+
+       status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+                                   acpi_gbl_FADT.cst_control, 8);
+       if (ACPI_FAILURE(status)) {
+               pr_warn("ACPI: Failed to claim processor _CST control\n");
+               return false;
+       }
+
+       cst_control_claimed = true;
+       return true;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+
+/**
+ * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
+ * @handle: ACPI handle of the processor object containing the _CST.
+ * @cpu: The numeric ID of the target CPU.
+ * @info: Object write the C-states information into.
+ *
+ * Extract the C-state information for the given CPU from the output of the _CST
+ * control method under the corresponding ACPI processor object (or processor
+ * device object) and populate @info with it.
+ *
+ * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
+ * acpi_processor_ffh_cstate_probe() to verify them and update the
+ * cpu_cstate_entry data for @cpu.
+ */
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+                               struct acpi_processor_power *info)
+{
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *cst;
+       acpi_status status;
+       u64 count;
+       int last_index = 0;
+       int i, ret = 0;
+
+       status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+       if (ACPI_FAILURE(status)) {
+               acpi_handle_debug(handle, "No _CST\n");
+               return -ENODEV;
+       }
+
+       cst = buffer.pointer;
+
+       /* There must be at least 2 elements. */
+       if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
+               acpi_handle_warn(handle, "Invalid _CST output\n");
+               ret = -EFAULT;
+               goto end;
+       }
+
+       count = cst->package.elements[0].integer.value;
+
+       /* Validate the number of C-states. */
+       if (count < 1 || count != cst->package.count - 1) {
+               acpi_handle_warn(handle, "Inconsistent _CST data\n");
+               ret = -EFAULT;
+               goto end;
+       }
+
+       for (i = 1; i <= count; i++) {
+               union acpi_object *element;
+               union acpi_object *obj;
+               struct acpi_power_register *reg;
+               struct acpi_processor_cx cx;
+
+               /*
+                * If there is not enough space for all C-states, skip the
+                * excess ones and log a warning.
+                */
+               if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
+                       acpi_handle_warn(handle,
+                                        "No room for more idle states (limit: %d)\n",
+                                        ACPI_PROCESSOR_MAX_POWER - 1);
+                       break;
+               }
+
+               memset(&cx, 0, sizeof(cx));
+
+               element = &cst->package.elements[i];
+               if (element->type != ACPI_TYPE_PACKAGE)
+                       continue;
+
+               if (element->package.count != 4)
+                       continue;
+
+               obj = &element->package.elements[0];
+
+               if (obj->type != ACPI_TYPE_BUFFER)
+                       continue;
+
+               reg = (struct acpi_power_register *)obj->buffer.pointer;
+
+               obj = &element->package.elements[1];
+               if (obj->type != ACPI_TYPE_INTEGER)
+                       continue;
+
+               cx.type = obj->integer.value;
+               /*
+                * There are known cases in which the _CST output does not
+                * contain C1, so if the type of the first state found is not
+                * C1, leave an empty slot for C1 to be filled in later.
+                */
+               if (i == 1 && cx.type != ACPI_STATE_C1)
+                       last_index = 1;
+
+               cx.address = reg->address;
+               cx.index = last_index + 1;
+
+               if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+                       if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
+                               /*
+                                * In the majority of cases _CST describes C1 as
+                                * a FIXED_HARDWARE C-state, but if the command
+                                * line forbids using MWAIT, use CSTATE_HALT for
+                                * C1 regardless.
+                                */
+                               if (cx.type == ACPI_STATE_C1 &&
+                                   boot_option_idle_override == IDLE_NOMWAIT) {
+                                       cx.entry_method = ACPI_CSTATE_HALT;
+                                       snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+                               } else {
+                                       cx.entry_method = ACPI_CSTATE_FFH;
+                               }
+                       } else if (cx.type == ACPI_STATE_C1) {
+                               /*
+                                * In the special case of C1, FIXED_HARDWARE can
+                                * be handled by executing the HLT instruction.
+                                */
+                               cx.entry_method = ACPI_CSTATE_HALT;
+                               snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+                       } else {
+                               continue;
+                       }
+               } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+                       cx.entry_method = ACPI_CSTATE_SYSTEMIO;
+                       snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+                                cx.address);
+               } else {
+                       continue;
+               }
+
+               if (cx.type == ACPI_STATE_C1)
+                       cx.valid = 1;
+
+               obj = &element->package.elements[2];
+               if (obj->type != ACPI_TYPE_INTEGER)
+                       continue;
+
+               cx.latency = obj->integer.value;
+
+               obj = &element->package.elements[3];
+               if (obj->type != ACPI_TYPE_INTEGER)
+                       continue;
+
+               memcpy(&info->states[++last_index], &cx, sizeof(cx));
+       }
+
+       acpi_handle_info(handle, "Found %d idle states\n", last_index);
+
+       info->count = last_index;
+
+      end:
+       kfree(buffer.pointer);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
index 2f380e7381d6a67afc7e3c0d8db506092ef6340a..15c5b272e69847c9924ac83b8fc4281874d9d250 100644 (file)
@@ -2187,7 +2187,7 @@ int acpi_video_register(void)
        if (register_count) {
                /*
                 * if the function of acpi_video_register is already called,
-                * don't register the acpi_vide_bus again and return no error.
+                * don't register the acpi_video_bus again and return no error.
                 */
                goto leave;
        }
index 863ade9add6d60232fa6be8434f4992ec60e2949..173447d50acfa7b02c8f3bd5a863408ea9a26ee7 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: acapps - common include for ACPI applications/tools
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -17,7 +17,7 @@
 /* Common info for tool signons */
 
 #define ACPICA_NAME                 "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT            "Copyright (c) 2000 - 2019 Intel Corporation"
+#define ACPICA_COPYRIGHT            "Copyright (c) 2000 - 2020 Intel Corporation"
 
 #if ACPI_MACHINE_WIDTH == 64
 #define ACPI_WIDTH          " (64-bit version)"
index 54f81eac7ec9d506292cb5ea358171d4117641f5..89101e53324b873d33d00dfcbea6124a882d5e21 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: accommon.h - Common include files for generation of ACPICA source
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d5478cd4a8576f8f3156e3ac94da58bdce35adfd..ede4b9cc9e85b2f64e021849aa9699c61291e683 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: acapps - common include for ACPI applications/tools
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 694cf206fa9a68b5d269b6f6ab0e81442be23c9d..a676daaa2da5093c313f985f855ed1933f3f14d6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acdebug.h - ACPI/AML debugger
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 82f81501566b0af1f85daed4a42c159441253252..7ba6e308f14645d16d07d519f21fd8c372daa93e 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acdispat.h - dispatcher (parser to interpreter interface)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c8652f91054eb250494076051472b000fb1f3466..79f292687bd61566b0417e1995daf4e361eca40f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acevents.h - Event subcomponent prototypes and defines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index fd3beea9342134e17de3c879909173054ba52252..38ffa2c0a496fca332fb7e732031552af1014faf 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acglobal.h - Declarations for global variables
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index bcf8f7501db748c0b7df22809f081879c157b776..67f282e9e0af17500dde847bc8d4943a51e877be 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: achware.h -- hardware specific interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 20706adbc14883f5b98676f901754f408c3057ef..a6d896cda2a566ef69a74d983f0f6f6c7bd48e35 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acinterp.h - Interpreter subcomponent prototypes and defines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 1ea52576f0a2e490c58cba27108b3eae5f9a8679..af58cd2dc9d379d4862c685b6d634bf89d2d0e4c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: aclocal.h - Internal data types used across the ACPI subsystem
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 283614e82a2028db6dbf2705f3d5cfd1babc09ed..2269e10bc21b41f8a6e4fd8f1c3e10fa077985fc 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acmacros.h - C macros for the entire subsystem.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 7da1864798a0e7fa679c611a68eb7ae75cb6a80b..e618ddfab2fd1aa5b3b3a320a5aa7b81c81a1095 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acnamesp.h - Namespace subcomponent prototypes and defines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 8def0e3d690fdaa69bb58fc78c59229df0412e52..9f0219a8cb985a48897126c667d9f2a99a914037 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acobject.h - Definition of union acpi_operand_object  (Internal object only)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -260,7 +260,8 @@ struct acpi_object_index_field {
 /* The buffer_field is different in that it is part of a Buffer, not an op_region */
 
 struct acpi_object_buffer_field {
-       ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */
+       ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field;    /* Special case for objects created by create_field() */
+       union acpi_operand_object *buffer_obj;  /* Containing Buffer object */
 };
 
 /******************************************************************************
index 9d78134428e33c348166f6d0dd0d945d2dabb6c2..8825394be9abc5ddc1d515cef39300897acee64f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acopcode.h - AML opcode information for the AML parser and interpreter
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 6e32c97cba6c44176713e13817d07f7ace5b2415..bc00b85c0a8f8cd698953f528098dec3ce5485f2 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: acparser.h - AML Parser subcomponent prototypes and defines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 387163b962a70fcf8a1320cf493bf68604423419..cd0f5df0ea23f1b48c071de96bddf7b28f26cfc4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acpredef - Information table for ACPI predefined methods and objects
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 422cd8f2b92e9113d579ae88397914b429a86978..6de8a1650d3d6aeaa6008a83c17b314a352945e4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acresrc.h - Resource Manager function prototypes
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2043dff370b13f1f0920e8d3a1af4fa58b3922b6..4c900c108f3fdbecb0da725be86e2808edf2e366 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acstruct.h - Internal structs
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index dfbf1dbd4033034280db2768c99ca89377f3b16f..734624facda37503ba3f852f649406b33b2c0f43 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: actables.h - ACPI table management
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 5fb50634e08e93d6a7964a086dec6269b07f76b7..7c89b470ec810d9b9a3e2b47b83098d0bb5c86db 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 49e412edd7c6795adeb673375d2c3fbd21014b21..1d541bbac4a3c9e15a203efbdcec3181a61acd4f 100644 (file)
@@ -5,7 +5,7 @@
  *                   Declarations and definitions contained herein are derived
  *                   directly from the ACPI specification.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 7c3bd4ab60fce749bd0f86441e24cb421fd2bf54..e5234e001acfee76de3654fa9f3d305fd3abb179 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: amlresrc.h - AML resource descriptors
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 47d2e50598491964561c302b455c99b19481f8cb..bb9600b867ee860fbc85d4e836cbc9fc2d87438d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dbhistry - debugger HISTORY command
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index e1632b340182b369ecfb77946f3e863003e52581..aa71f65395d25863e787315137a59b26f7c5bcf8 100644 (file)
@@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer,
                if (ACPI_FAILURE(status)
                    || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
                        acpi_os_printf
-                           ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+                           ("Invalid address space ID: must be between 0 and %u inclusive\n",
                             ACPI_NUM_PREDEFINED_REGIONS - 1);
                        return (AE_OK);
                }
index 85b34d02233e3e46cf02c0a86e166eafa4334fe0..ad17f62e51d9957915760ad7a272fd4640d522aa 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: dsargs - Support for execution of dynamic arguments for static
  *                       objects (regions, fields, buffer fields, etc.)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 5034fab9cf69484f70432c93dadcfa79a1ec2688..4b5b6e859f62f9f9abf07bb0904a099f4cfe039a 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: dscontrol - Support for execution control opcodes -
  *                          if/else/while/return
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 0d3e1ced1f5764b1b084be71ba9db0a28c1c1639..63bc5f19fb82c59afe23b324d03b7355a5ea17d6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dsdebug - Parser/Interpreter interface - debugging
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index faa38a22263ad9438e44c3233924f47a2070fa5e..c901f5aec739abf4b8783fba3c68ac1eb0d04cef 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dsfield - Dispatcher field routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -243,7 +243,7 @@ cleanup:
  * FUNCTION:    acpi_ds_get_field_names
  *
  * PARAMETERS:  info            - create_field info structure
- *  `           walk_state      - Current method state
+ *              walk_state      - Current method state
  *              arg             - First parser arg for the field name list
  *
  * RETURN:      Status
index a1ffed29903bdb16851676c71e5be1501f13c1f8..9be2a309424cb9ff409c4afd1b2eabb8b79f05f4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dsinit - Object initialization namespace walk
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index f59b4d944f7fa02c1090c798a40e0ddd92d8000e..cf67caff878abfd415502cf7e1fe3fdaf44ace62 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 179129a2deb174fb2348d521f13f7e2980b83376..c0a14a6a2c203a1c8330910c4649b0391abda6b5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dsobject - Dispatcher object management routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 10f32b62608eee5df9e26d60bea0f8f0fb6b2520..d9c26e720cb758d4737b23627ef1587fbbf35163 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dsopcode - Dispatcher support for regions and fields
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
        }
 
        obj_desc->buffer_field.buffer_obj = buffer_desc;
+       obj_desc->buffer_field.is_create_field =
+           aml_opcode == AML_CREATE_FIELD_OP;
 
        /* Reference count for buffer_desc inherits obj_desc count */
 
index 997faa10f615188a66c7df805d4d90f39281830d..d869568d55c23bbe1ba0193a09ba98c05865a386 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dspkginit - Completion of deferred package initialization
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d75aae3045958dfa0f96048fe38f36a08fe3fab9..5e81a1ae44cff9bb52e8f65244e318858e3bcd60 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: dswexec - Dispatcher method execution callbacks;
  *                        dispatch to interpreter.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c88fd31208a5ba9ef9057de5f5b8d32c1148012e..697974e37edfbc1fda6b031d111828dd8c2774bf 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dswload - Dispatcher first pass namespace load callbacks
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
        ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
                          walk_state));
 
+       /*
+        * Disassembler: handle create field operators here.
+        *
+        * create_buffer_field is a deferred op that is typically processed in load
+        * pass 2. However, disassembly of control method contents walk the parse
+        * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
+        * in a later walk. This is a problem when there is a control method that
+        * has the same name as the AML_CREATE object. In this case, any use of the
+        * name segment will be detected as a method call rather than a reference
+        * to a buffer field.
+        *
+        * This earlier creation during disassembly solves this issue by inserting
+        * the named object in the ACPI namespace so that references to this name
+        * would be a name string rather than a method call.
+        */
+       if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
+           (walk_state->op_info->flags & AML_CREATE)) {
+               status = acpi_ds_create_buffer_field(op, walk_state);
+               return_ACPI_STATUS(status);
+       }
+
        /* We are only interested in opcodes that have an associated name */
 
        if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
index 935a8e2623e4bfd89b2afb747a9c9346ae699029..b31457ca926cc8ec0a4a0eaab9cff87ef0d3e9d3 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dswload2 - Dispatcher second pass namespace load callbacks
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 39acf7b286da83977363e070fc4ddde84b4eff4d..9c397642fed7335e1600a2e83050899c34ef7187 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dswscope - Scope stack manipulation
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index de79f835a373759bbe115d0a48ba62856b9176e3..809a0c0536b5936d99087708e3b05393039716e0 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: dswstate - Dispatcher parse tree walk management routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 9e2f5a05c066eb2b88ce6cf922bb0f422c8952df..8c83d8c620dc3deb10dc9a5a61b75bd6b2b0251b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evevent - Fixed Event handling and dispatch
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 5c77bee5d31fbd6a84d3c0093f03410fbc9fa6d2..0ced84ae13e4cd1919497485ec3fdf766511b54d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evglock - Global Lock support
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 344feba290635e02a588fc9316b49b4aaf16aeae..3e39907fedd9f44235f4cab66c4d624db6d3006d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evgpe - General Purpose Event handling and dispatch
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 9c7adaa7b582296bf37bbe361040714f31471a5d..132adff1e13161242b8a7421d0c6889e5f8e0627 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evgpeblk - GPE block creation and initialization.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 70d21d5ec5f370bb8dcf386d3b852852e2c7d4aa..6effd8076dcc89e13038ab2a8005eea1c93aae44 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evgpeinit - System GPE initialization and update
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 917892227e09a14f5a0b9ebbaafe2a964cd2f8f3..738873e876ca0f4485f201d02c323b81c16bfa92 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evgpeutil - GPE utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 3ef4e27995f0d12ebccc3a58e99a274c08e11fa9..5884eba047f735004a57ce3aaa684ead06b19550 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evhandler - Support for Address Space handlers
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index aa98fe07cd1b11ac76e6e0f26e2e069ece200264..ce1eda6beb845d32cb89f94bea78b12c88da303f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evmisc - Miscellaneous event manager support functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 1ff126460007d99ca3941919e84e24eee5baafa0..738d4b231f34ae8a4356ce25f5288d5e29364548 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evregion - Operation Region support
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index aee09640d71046525fd9d52627083c7505043dbe..aefc0145e583d76aa06205d6ef1adb83efae0b47 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evrgnini- ACPI address_space (op_region) init
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 279ef0557aa38e29f56ed16a0993c342dc0af2fa..e4e012297eee11fd3fe343e79c3a18ac2c6b3f9a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evxface - External interfaces for ACPI events
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index e528fe56b75579fed1f41a8a5ceb59351426fb2c..1a15b0087379874259784e7b7262f72f6e9aa8cc 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 04a40d563dd6771edb5bcd7ad12c14eba8f3259a..2c39ff2a7406900ea593f6d7f886253410444379 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 47265b073e6ff48e6519469eebcfe31f0c10a64e..da97fd0c6b51e2a291a186b287b15374ca1b6111 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
  *                         Address Spaces.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c7af07566b7b177e12e9e8c902f390432f15b312..43711412722f35f8b0889851ced3ae803394af63 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exconcat - Concatenate-type AML operators
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 46a8baf28bd0f3ae268cef25cf1ea9dcc00e99c2..68efd704e2dcc7d945968404d09418490e6606d2 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index ca2966bacb50eeb1eeecbf9c2ee5aea6b137320d..50c7aad2e86d46b7a452b7e358ff858b9c8495d2 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exconvrt - Object conversion routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index f376fc00064e0c8cfa6f961da08906e225f19b63..a17482428b463134aeb010504b708e7af827e789 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: excreate - Named object creation
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b1aeec8cac55090cb952aa3da676a8ff6d3ceca8..a5223dcaee709755641b99d421dbe6ca05f5afd8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exdebug - Support for stores to the AML Debug Object
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index a9bc938a3b55743803241e8d0fe15c26ed177e95..47a4d9a40d6b72cc5ef932bf62f152c0d3f544a5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exdump - Interpreter debug output routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d3d2dbfba680c18d533caf861eb26a910dbc0db1..e85eb31e50754c31fa96bdd5e29235c835d4b945 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exfield - AML execution - field_unit read/write
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length)
  * RETURN:      Status
  *
  * DESCRIPTION: Read from a named field. Returns either an Integer or a
- *              Buffer, depending on the size of the field.
+ *              Buffer, depending on the size of the field and whether if a
+ *              field is created by the create_field() operator.
  *
  ******************************************************************************/
 
@@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
         * the use of arithmetic operators on the returned value if the
         * field size is equal or smaller than an Integer.
         *
+        * However, all buffer fields created by create_field operator needs to
+        * remain as a buffer to match other AML interpreter implementations.
+        *
         * Note: Field.length is in bits.
         */
        buffer_length =
            (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
 
-       if (buffer_length > acpi_gbl_integer_byte_width) {
+       if (buffer_length > acpi_gbl_integer_byte_width ||
+           (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD &&
+            obj_desc->buffer_field.is_create_field)) {
 
                /* Field is too large for an Integer, create a Buffer instead */
 
index 95a0dcb4f7b91accdf9b4f333e1b8e12d246452e..ade35ff1c7ba5626055866a2e01ba2ae578e3ed9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exfldio - Aml Field I/O
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 60e854965af9862775a5dcbfd57b3839b16b15ab..717e3998fd77f6f0759333f3cb6de54b3ae82c47 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 775cd62af5b3d2fc21d42aa8ed42277e15ae67c0..9ff247cba571bc0dfbce75acf5b07c9eda346564 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exmutex - ASL Mutex Acquire/Release functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 6b76be5212a470e8a5822b66cfadd7acc49d578d..74f8b0d0452bc5c68a781414e5b4f180610d390f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exnames - interpreter/scanner name load/execute
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 06e35ea0982345fe54a2e3161aa6bdd19366198b..a46d685a3ffcf0888571e89cbdde6e0263197adc 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exoparg1 - AML execution - opcodes with 1 argument
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 5e4a31a11df4f65198173d4557a870ff899fed26..03241d18ac1d77b56c0823d815251be16044e83f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exoparg2 - AML execution - opcodes with 2 arguments
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index a4ebce4179308c6fbd8e97da5413a93c109abed4..c8d0d75fc4505722c690bb1047ab22b4bd50dee8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exoparg3 - AML execution - opcodes with 3 arguments
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 31385a0b2dab9af77ecf3cc5c5b33dd42272139d..55d0fa056fe7a27965b6be0b7fef607ac75c9bed 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exoparg6 - AML execution - opcodes with 6 arguments
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 728d752f7adcd0e99c0c7f575829470e2284f9bb..a4e306690a21bcd09a13a85005ddbf75736e8cac 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exprep - ACPI AML field prep utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c08521194b297f2d210bf654e3abf5886d83f9fa..d15a66de26c07232e4bf9a24d828e3b3e5eae9a5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exregion - ACPI default op_region (address space) handlers
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b223d01e6bf8ef84df483a3e4b6f54968866636d..3e4018678c0933f35fffc598126fb873d234573b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exresnte - AML Interpreter object resolution
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 36da5c0ef69cf060db4205e30f55a750243bb6c8..912a078c60a43e554169133252663708abe9c229 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exresolv - AML Interpreter object resolution
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index bdfe4d33b48370f3027febc93fcb01b09c70c53b..4d1b22971d58daf4150ac10a2249d741c00ab082 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exresop - AML Interpreter operand/object resolution
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c5aa4b0deb70b488f394bfdb9e1017de40c5615e..760bc7cef55afc70d88720c7d2f739a0e69cdd73 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exserial - field_unit support for serial address spaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 7f3c3571c292bba5810df849dcbe74caf821ca3f..3adc0a29d890b259cb86043bc81bf6df5248815f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exstore - AML Interpreter object store support
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 4e43c8277f0709249962bd35a940b8583060a316..8c34f4e2ab8fb25e220f18e40cdac532986863cb 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: exstoren - AML Interpreter object store support,
  *                        Store to Node (namespace object)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index dc9e2b1c1ad95bb476f161d7048e9883fe50d84f..dc66696080a561d4883588fc28547f96804c6687 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exstorob - AML object store support, store to object
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index a538f7799b7872cb7ac0c69ee9fc981a165f88fc..f329b01672bb67f332bb0731bf1056391ff03ecc 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exsystem - Interface to OS services
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index db7f93ca539f665499ad07bb2c6d5d256324a742..832a47885b99d7b415481ef23407c42472f59bb1 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: extrace - Support for interpreter execution tracing
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 75380be1c2ef1e2159f46cb8c951c0fb6514154e..8fefa6feac2f98bd9316feefe2f893a7d0a62103 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: exutils - interpreter/scanner utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 926f7e080f22134dd4859576ea47f0dc950704ea..9b9aac27ff7e8bad6da824928d4bfd8d342fb31d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index dee3affaca491114b3f393c81ce1d848f8fa9fd8..d9be5d0545d4c854caf0ff78f2e9eb46b853220e 100644 (file)
@@ -4,7 +4,7 @@
  * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
  *                    extended FADT-V5 sleep registers.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 565bd3f29f313ce0b8b67293ab8cb414807ca434..1b4252bdcd0b1a6346a1dd1d375c3405ca7266e8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: hwgpe - Low level GPE enable/disable/clear functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b62db8ec446fa503e1cf62cdd090032b7cb93b74..243a25add28f0360f4853477a1c9d70ead8b4295 100644 (file)
@@ -4,7 +4,7 @@
  * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
  *                   original/legacy sleep/PM registers.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2fb9f75d71c54226da6308228c3b880b3af18df8..07473ddfa9a9492fb4e65c3acf79186415abf3b4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: hwtimer.c - ACPI Power Management Timer Interface
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index cd576153257c603c3c9886deeef137defc1313de..4d94861e60934c9fd07327d27b8871bbede58500 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: hwvalid - I/O request validation
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c4fd971040249cc82bb44af6bf21843aa1b1cabd..134dbfadcd155c6e4e6591f1edb165464a9e94a5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: hwxface - Public ACPICA hardware interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2919746c904185537c0516a75d327d176feccb03..a4b66f4b2714123f5b30fac0c94a490321125b18 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 0e97ed38973ff7be9efcd5ec91efeab2bf61e21c..d5e8405e9d8f0ad9f4d916d0679b7cd8089824ce 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsarguments - Validation of args for ACPI predefined methods
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c86d0770ed6e83c0dd3f226e41cb3a66c3a1cfae..c86c82939ebb88d3687749ba48663639fac22a59 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: nsconvert - Object conversions for objects returned by
  *                          predefined methods
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 9ad340f644a1115f85bd5085acb6d999ee961343..994f0b556c6043378caf72724ad634276bf7e859 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsdump - table dumping routines for debug
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 73e5c83c8c9f8706400b828a7493345a33ffdbba..b691fe20e384ec76ac9a7e16d6d8967bc9636c49 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsdump - table dumping routines for debug
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 61e9dfc9fe8c05a1f875e6ccec00527b809c50a8..e16f6a0c2c3f17a935af387e4d4dae72701d94db 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsinit - namespace initialization
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d7c4d6e8e21e62dcffc7186db03f8b44e3609cc7..9ba17891edb652a543fd8b88a4755b679e2f8377 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsload - namespace loading/expanding/contracting procedures
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index f16cf5e4742cc1585999a73478fbfccf9b174c60..7e74a765e7850ad7d5c9c9b850cbc745b5daef53 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsparse - namespace interface to AML parser
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2f9d93122d0c2ea59b3103919115c50393b88954..0cea9c363acee958dae66b64fa7d8b3fdb64fdf6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nspredef - Validation of ACPI predefined methods and objects
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 9a80e3b23496bcff8be5f8c12b83bbf965aa9160..237b3ddeb075705968331b400d025538c023dd44 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsprepkg - Validation of package objects for predefined names
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index be86fea8e4d48aa08bc0032f582749a1a40ba771..90db2d85e7f5c5c0b3b12e8e5dab9243f9ae6c30 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nsrepair - Repair for objects returned by predefined methods
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 663d85e0adba92fd2cd6f77f7608a9c01dca6bd6..125143c41bb8100bb4f5953ba1070bf400c22414 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: nsrepair2 - Repair for objects returned by specific
  *                          predefined methods
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b8d007c84d329e86b06ee7c127c26941b6fd087c..e66abdab8f31c2a3e5e2a6e7d170499325e491fe 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
  *                        parents and siblings and Scope manipulation
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index ceea6af79d12135cd00670dcb5cad533beeb3e68..b7f3e8603ad841f8af776d88415a960087a5ca1d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: nswalk - Functions for walking the ACPI namespace
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 161e60ddfb6937f80d3aee27952b056ffbf78a16..984129dcaa0c1efc46b550dc07077f129fc244c5 100644 (file)
@@ -4,7 +4,7 @@
  * Module Name: nsxfname - Public interfaces to the ACPI subsystem
  *                         ACPI Namespace oriented interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index e62c7897fdf18f7483bd2c4334dc81fe50bf3582..3b40db4ad9f3ec670109542970f5a16a46fccd70 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psargs - Parse AML opcode arguments
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 207805047bc4dcf11fef3eb44b7de8f467f070c2..3cf0687b99157b4b111f32154719a7ae76e16838 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psloop - Main AML parse loop
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index ded2779fc8ea3bde7057a11c04341c70b83c710d..2480c26c517106056bc045cc27b91c20fa36af2f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psobject - Support for parse objects
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 43775c5ce17c535c3f2addb89ec5464be18a7a8b..28af49263ebfae5808a04e569de988d483aab2b5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psopcode - Parser/Interpreter opcode information table
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 15e7563829f160dff12af9edb2307637420b9d91..ab9327f6a63c6d35889ce4efb5930f820195f174 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psopinfo - AML opcode information functions and dispatch tables
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 9b386530ffbec290c0b2b018fba983f3435c0ea2..c780046bf294146d9baf891b3104cc57b375bacc 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psparse - Parser top level AML parse routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index f153ca804740ee0363915f71aec8705a2c312730..fceb311995e9b06b2289e90068126bc4f52a1042 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psscope - Parser scope stack management routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 22d8a2becdd008a47036dd5efa44aa4d61149634..c8aef069486451122bd3823ffa489404352a9cfd 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: pstree - Parser op tree manipulation/traversal/search
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2512f584fa3c268620036d46e855edeedd4e20c1..00efae2f95ba8872f5f70d0f7d807bd6664b2f34 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psutils - Parser miscellaneous utilities (Parser only)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index cf91841297c2649561be04673b80c09d859e2a07..0fe3adf6b0e547aa59c9942cdb3dd5a9b1060f40 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: pswalk - Parser routines to walk parsed op tree(s)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index ee2ee2c858f2eb3735eb136d85cfb225953f56fb..1bbfc8def3885085755a39712f36b35932215a9d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: psxface - Parser external interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2cf36451e46ff5f4d6c2c1b21aac852725db3db2..523b1e9b98d4587590cb283723583559478e54a1 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbdata - Table manager data structure functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 0041bfba9abcfac2bcf280927220198244cde5d1..907edc5edba71735c4587c8c48f23ce6cdc1b3de 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbfadt   - FADT table utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b2abb40023a6588c715f0130a1984c3be0ad3853..56d81e490a5cc14f7753242824bb7c3b83b74387 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbfind   - find table
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index ef1ffd36ab3ff68a3a88bd50736161e38b223d3f..0bb15add2245f908f16617a3ec10d1c88ffe5291 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbinstal - ACPI table installation and removal
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 4764f849cb782fe99dd1073e43dba1fc0b56245d..0b3494ad9a70440eb5d4900a702542bb7cdb8dda 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbprint - Table output utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c5f0b8ec70cc4d788392ece09b9dcf83ae6ad2cd..dfe1ac3ae34a899307a549f8ae2d5ca00511b75f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbutils - ACPI Table utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 1640685bf4ae8fc80ab115f04c00d309ab9eb1ab..f8403d480318adf1d3fbdbcac6cc3a30b60ba786 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbxface - ACPI table-oriented external interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 0782acf85722faf0a4e00c32f2e7554babfb1c75..bcba993d4dacffff8196a4701ee599849961c6a2 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbxfload - Table load/unload external interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index e2859d09ca2e35e490dc820c5b6c4ad91d1f50c7..0edc6ef5d46daaaaa8584220ee2cdafda5ec8e28 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: tbxfroot - Find the root ACPI table (RSDT)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index bb260376bd59573b7b7cbe0ba70e2807d10c0fb7..99fa48722cf686e45f38e2228ddccc4dace8b7f3 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utaddress - op_region address range check
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d64da4d9e8d06225b73344ba1eeffb6db2460354..303ab51b4fcfe92837459b205c47b6b62c6b1c48 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utalloc - local memory allocation routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index f6cd7d4f698b3fff1ea1724a64f8ad39d42a5522..d78656d960e8375a7b89ddc741658b2e34026bc0 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utascii - Utility ascii functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index db897af1de05e6e965fa7552ed8f1a6f10ce739a..f2ec427f4e29213fb00ac536e22d0b9f66daaa0a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utbuffer - Buffer dump routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 8533fce7fa937fbfd76ebbe800a53477e4be1b17..1b03a2747401e8e781c338fd20c17d5d01e0fac1 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utcache - local cache allocation routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 1fb8327f3c3be0853cb23a8860a0d974193025e9..41bdd0278dd8e756e6cfe511f421d24e9d3907f5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utcopy - Internal to external object translation utilities
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 5b169b5f0f1a3a098c2061f788bb1718364619c7..0c8cb0612414422ced38cae310fe49905d89e37b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utdebug - Debug print/trace routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 65beaa23766929974d2576d7ef4d025af202fe1a..befdd13b403b7c0091aaf97cde1d0f7832ac0d6f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utdecode - Utility decoding routines (value-to-string)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 558a9f3b0678228a0e7431ef9410b8b8efa281af..8180d1a458f5b87ff7763adf0cb22c6500df8adf 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: uteval - Object evaluation
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b0622ec4bb85edfc5fb8aa70b25f189fbae7b96b..e6dcbdc3fc6ec8e3d58f19ba892ae8ab5a709545 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utglobal - Global variables for the ACPI subsystem
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index b6da135d5f41a8b9c1d15e676bff312662853c02..0e02f12513dcb19aeb95a7f89df4d4b948aba282 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: uthex -- Hex/ASCII support functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 30198c828ab6749b192cac4c1b4e0c310650e205..3bb06935a2ad36da6969e81391c9e203016a8e83 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 6f33e7c723276885350836407f102a0237839ea0..fdbc397c038d9a74f5e9f17d793c7ecf2bf17f05 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utinit - Common ACPI subsystem initialization
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 8b4ff11d617abf91a01244ce63392f8346f6f3a2..46be549539e732b3b6a1543883026af5ce1e257a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utlock - Reader/Writer lock interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index eee97a9026964c2f748ad418ec7f3f9ecf4e3c40..3e60bdac2200648090a22ef023b5f4ab090ef701 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utobject - ACPI object create/delete/size/cache routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index ad2b218039d0f9eee89225e014dbe921afac601d..0a01c08dad8a12064e1e75df35fadb4a4ef0ba56 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utosi - Support for the _OSI predefined control method
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 1b0f68f5ed8c59e49d42524a3af1ca154d4b2124..05fe3470fb93095282087bc89d8b5404a3e95146 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utpredef - support functions for predefined names
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 5839f2fa7400e19ead38b6f7d1eb23387bffe7fe..a874dac7db5cc9ec46b97b422826d8506c15be62 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utprint - Formatted printing routines
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 14de4d15e6188acca6d0e777208a97ca93039e9e..d366be431a8460382ba6591b7f985bda30132f32 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: uttrack - Memory allocation tracking routines (debug only)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 0a7cf800764313cf508c19e25ac00b5a3c2ac9dd..b8039954b0d1d215a9c12d423fb06b3d190fcbf9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utuuid -- UUID support functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index f497c4b30e6539c0a83d39c335598f7b6c50de89..ca7c9f0144efdc3e0d3202584e148a0687294cfb 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utxface - External interfaces, miscellaneous utility functions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index cf769e94fe0f1dcbb82f38114d702f95bf8e3c1d..653e3bb20036f05f8097d300a7b9d2185c143988 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: utxfinit - External interfaces for ACPICA initialization
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 8906c80175e6841b2b763934f2a003fbe20fd93b..103acbbfcf9a513ff219404e0fe8894df1f563db 100644 (file)
@@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
 
        switch (generic->notify.type) {
        case ACPI_HEST_NOTIFY_POLLED:
-               timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE);
+               timer_setup(&ghes->timer, ghes_poll_func, 0);
                ghes_add_timer(ghes);
                break;
        case ACPI_HEST_NOTIFY_EXTERNAL:
index 33f71983e001758ab0c4be5c2caf40f24c9b293d..6078064684c6ca2f64922f3d7357b2314385c8c3 100644 (file)
@@ -298,6 +298,59 @@ out:
        return status;
 }
 
+struct iort_workaround_oem_info {
+       char oem_id[ACPI_OEM_ID_SIZE + 1];
+       char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+       u32 oem_revision;
+};
+
+static bool apply_id_count_workaround;
+
+static struct iort_workaround_oem_info wa_info[] __initdata = {
+       {
+               .oem_id         = "HISI  ",
+               .oem_table_id   = "HIP07   ",
+               .oem_revision   = 0,
+       }, {
+               .oem_id         = "HISI  ",
+               .oem_table_id   = "HIP08   ",
+               .oem_revision   = 0,
+       }
+};
+
+static void __init
+iort_check_id_count_workaround(struct acpi_table_header *tbl)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+               if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+                   !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+                   wa_info[i].oem_revision == tbl->oem_revision) {
+                       apply_id_count_workaround = true;
+                       pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
+                       break;
+               }
+       }
+}
+
+static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
+{
+       u32 map_max = map->input_base + map->id_count;
+
+       /*
+        * The IORT specification revision D (Section 3, table 4, page 9) says
+        * Number of IDs = The number of IDs in the range minus one, but the
+        * IORT code ignored the "minus one", and some firmware did that too,
+        * so apply a workaround here to keep compatible with both the spec
+        * compliant and non-spec compliant firmwares.
+        */
+       if (apply_id_count_workaround)
+               map_max--;
+
+       return map_max;
+}
+
 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
                       u32 *rid_out)
 {
@@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
                return -ENXIO;
        }
 
-       if (rid_in < map->input_base ||
-           (rid_in >= map->input_base + map->id_count))
+       if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
                return -ENXIO;
 
        *rid_out = map->output_base + (rid_in - map->input_base);
@@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void)
                return;
        }
 
+       iort_check_id_count_workaround(iort_table);
        iort_init_platform_devices();
 }
index 8f0e0c8d8c3d902586736b66e5ff7544ffc78582..15cc7d5a6185e09339167df16d451443371801fe 100644 (file)
@@ -38,6 +38,8 @@
 #define PREFIX "ACPI: "
 
 #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
+#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
+       ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
 
 #define ACPI_BATTERY_DEVICE_NAME       "Battery"
 
@@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
 
 static bool acpi_battery_is_degraded(struct acpi_battery *battery)
 {
-       return battery->full_charge_capacity && battery->design_capacity &&
+       return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+               ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
                battery->full_charge_capacity < battery->design_capacity;
 }
 
@@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
                                     enum power_supply_property psp,
                                     union power_supply_propval *val)
 {
-       int ret = 0;
+       int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
        struct acpi_battery *battery = to_acpi_battery(psy);
 
        if (acpi_battery_present(battery)) {
@@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy,
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
        case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
-               if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
                        ret = -ENODEV;
                else
                        val->intval = battery->design_capacity * 1000;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
        case POWER_SUPPLY_PROP_ENERGY_FULL:
-               if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+               if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
                        ret = -ENODEV;
                else
                        val->intval = battery->full_charge_capacity * 1000;
@@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
                        val->intval = battery->capacity_now * 1000;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               if (battery->capacity_now && battery->full_charge_capacity)
-                       val->intval = battery->capacity_now * 100/
-                                       battery->full_charge_capacity;
+               if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
+                       full_capacity = battery->full_charge_capacity;
+               else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+                       full_capacity = battery->design_capacity;
+
+               if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
+                   full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
                else
-                       val->intval = 0;
+                       val->intval = battery->capacity_now * 100/
+                                       full_capacity;
                break;
        case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
                if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
+static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
+       POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+       POWER_SUPPLY_PROP_MODEL_NAME,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+       POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
 static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_PRESENT,
@@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void)
 static int sysfs_add_battery(struct acpi_battery *battery)
 {
        struct power_supply_config psy_cfg = { .drv_data = battery, };
+       bool full_cap_broken = false;
+
+       if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+           !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+               full_cap_broken = true;
 
        if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
-               battery->bat_desc.properties = charge_battery_props;
-               battery->bat_desc.num_properties =
-                       ARRAY_SIZE(charge_battery_props);
-       } else if (battery->full_charge_capacity == 0) {
-               battery->bat_desc.properties =
-                       energy_battery_full_cap_broken_props;
-               battery->bat_desc.num_properties =
-                       ARRAY_SIZE(energy_battery_full_cap_broken_props);
+               if (full_cap_broken) {
+                       battery->bat_desc.properties =
+                           charge_battery_full_cap_broken_props;
+                       battery->bat_desc.num_properties =
+                           ARRAY_SIZE(charge_battery_full_cap_broken_props);
+               } else {
+                       battery->bat_desc.properties = charge_battery_props;
+                       battery->bat_desc.num_properties =
+                           ARRAY_SIZE(charge_battery_props);
+               }
        } else {
-               battery->bat_desc.properties = energy_battery_props;
-               battery->bat_desc.num_properties =
-                       ARRAY_SIZE(energy_battery_props);
+               if (full_cap_broken) {
+                       battery->bat_desc.properties =
+                           energy_battery_full_cap_broken_props;
+                       battery->bat_desc.num_properties =
+                           ARRAY_SIZE(energy_battery_full_cap_broken_props);
+               } else {
+                       battery->bat_desc.properties = energy_battery_props;
+                       battery->bat_desc.num_properties =
+                           ARRAY_SIZE(energy_battery_props);
+               }
        }
 
        battery->bat_desc.name = acpi_device_bid(battery->device);
index b758b45737f50ff5ce1d9a816f7a7164ed8525b1..f6925f16c4a2ab27cf5c518d6ca4380872079f43 100644 (file)
@@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
                },
                .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
        },
+       {
+               /*
+                * Razer Blade Stealth 13 late 2019, notification of the LID device
+                * only happens on close, not on open and _LID always returns closed.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
+               },
+               .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+       },
        {}
 };
 
index 08bb9f2f2d2310367c9fde629aaa62e8955bf0e9..b64c62bfcea566be3a8435760d17e0af56632cde 100644 (file)
@@ -1314,9 +1314,20 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
  */
 int acpi_dev_pm_attach(struct device *dev, bool power_on)
 {
+       /*
+        * Skip devices whose ACPI companions match the device IDs below,
+        * because they require special power management handling incompatible
+        * with the generic ACPI PM domain.
+        */
+       static const struct acpi_device_id special_pm_ids[] = {
+               {"PNP0C0B", }, /* Generic ACPI fan */
+               {"INT1044", }, /* Fan for Tiger Lake generation */
+               {"INT3404", }, /* Fan */
+               {}
+       };
        struct acpi_device *adev = ACPI_COMPANION(dev);
 
-       if (!adev)
+       if (!adev || !acpi_match_device_ids(adev, special_pm_ids))
                return 0;
 
        /*
index eb58fc475a033aca861c4900824cc6885d0c32a7..387f27ef3368b1f9e591e0b143331428f36c1241 100644 (file)
@@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev)
 }
 
 static const struct acpi_device_id int3407_device_ids[] = {
+       {"INT1047", 0},
        {"INT3407", 0},
        {"", 0},
 };
index 5c7a90186e3c5d27e4bae3ef36c2b31a5efb5866..1ec7b6900662cadac2bf498b6a9874bd99b92354 100644 (file)
 
 #define INT3401_DEVICE 0X01
 static const struct acpi_device_id int340x_thermal_device_ids[] = {
+       {"INT1040"},
+       {"INT1043"},
+       {"INT1044"},
+       {"INT1047"},
        {"INT3400"},
        {"INT3401", INT3401_DEVICE},
        {"INT3402"},
index d05be13c1022c240b58da328da71b946e0854397..08bc9751fe6620f6e19f4356b7596c4a235feba6 100644 (file)
@@ -1052,29 +1052,21 @@ void acpi_ec_unblock_transactions(void)
 /* --------------------------------------------------------------------------
                                 Event Management
    -------------------------------------------------------------------------- */
-static struct acpi_ec_query_handler *
-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
-{
-       if (handler)
-               kref_get(&handler->kref);
-       return handler;
-}
-
 static struct acpi_ec_query_handler *
 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
 {
        struct acpi_ec_query_handler *handler;
-       bool found = false;
 
        mutex_lock(&ec->mutex);
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
-                       found = true;
-                       break;
+                       kref_get(&handler->kref);
+                       mutex_unlock(&ec->mutex);
+                       return handler;
                }
        }
        mutex_unlock(&ec->mutex);
-       return found ? acpi_ec_get_query_handler(handler) : NULL;
+       return NULL;
 }
 
 static void acpi_ec_query_handler_release(struct kref *kref)
index 816b0803f7fb778c9165a34d2009284f6114ddb1..aaf4e8f348cf301cf481ced72a81f8c0644c93e0 100644 (file)
@@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev);
 
 static const struct acpi_device_id fan_device_ids[] = {
        {"PNP0C0B", 0},
+       {"INT1044", 0},
        {"INT3404", 0},
        {"", 0},
 };
@@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = {
 #define FAN_PM_OPS_PTR NULL
 #endif
 
+#define ACPI_FPS_NAME_LEN      20
+
 struct acpi_fan_fps {
        u64 control;
        u64 trip_point;
        u64 speed;
        u64 noise_level;
        u64 power;
+       char name[ACPI_FPS_NAME_LEN];
+       struct device_attribute dev_attr;
 };
 
 struct acpi_fan_fif {
@@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b)
        return fps1->speed - fps2->speed;
 }
 
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+       int count;
+
+       if (fps->control == 0xFFFFFFFF || fps->control > 100)
+               count = snprintf(buf, PAGE_SIZE, "not-defined:");
+       else
+               count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+       if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+               count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+       else
+               count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point);
+
+       if (fps->speed == 0xFFFFFFFF)
+               count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+       else
+               count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed);
+
+       if (fps->noise_level == 0xFFFFFFFF)
+               count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+       else
+               count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100);
+
+       if (fps->power == 0xFFFFFFFF)
+               count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n");
+       else
+               count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power);
+
+       return count;
+}
+
 static int acpi_fan_get_fps(struct acpi_device *device)
 {
        struct acpi_fan *fan = acpi_driver_data(device);
@@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device)
        }
        for (i = 0; i < fan->fps_count; i++) {
                struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
-               struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
+               struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name),
+                                               &fan->fps[i] };
                status = acpi_extract_package(&obj->package.elements[i + 1],
                                              &format, &fps);
                if (ACPI_FAILURE(status)) {
                        dev_err(&device->dev, "Invalid _FPS element\n");
-                       break;
+                       goto err;
                }
        }
 
@@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device)
        sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
             acpi_fan_speed_cmp, NULL);
 
+       for (i = 0; i < fan->fps_count; ++i) {
+               struct acpi_fan_fps *fps = &fan->fps[i];
+
+               snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+               fps->dev_attr.show = show_state;
+               fps->dev_attr.store = NULL;
+               fps->dev_attr.attr.name = fps->name;
+               fps->dev_attr.attr.mode = 0444;
+               status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+               if (status) {
+                       int j;
+
+                       for (j = 0; j < i; ++j)
+                               sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+                       break;
+               }
+       }
+
 err:
        kfree(obj);
        return status;
@@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, fan);
 
        if (acpi_fan_is_acpi4(device)) {
-               if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
-                       goto end;
+               result = acpi_fan_get_fif(device);
+               if (result)
+                       return result;
+
+               result = acpi_fan_get_fps(device);
+               if (result)
+                       return result;
+
                fan->acpi4 = true;
        } else {
                result = acpi_device_update_power(device, NULL);
                if (result) {
                        dev_err(&device->dev, "Failed to set initial power state\n");
-                       goto end;
+                       goto err_end;
                }
        }
 
@@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
                                                &fan_cooling_ops);
        if (IS_ERR(cdev)) {
                result = PTR_ERR(cdev);
-               goto end;
+               goto err_end;
        }
 
        dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
@@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev)
        result = sysfs_create_link(&cdev->device.kobj,
                                   &pdev->dev.kobj,
                                   "device");
-       if (result)
+       if (result) {
                dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+               goto err_end;
+       }
+
+       return 0;
+
+err_end:
+       if (fan->acpi4) {
+               int i;
+
+               for (i = 0; i < fan->fps_count; ++i)
+                       sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+       }
 
-end:
        return result;
 }
 
@@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev)
 {
        struct acpi_fan *fan = platform_get_drvdata(pdev);
 
+       if (fan->acpi4) {
+               struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+               int i;
+
+               for (i = 0; i < fan->fps_count; ++i)
+                       sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+       }
        sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
        sysfs_remove_link(&fan->cdev->device.kobj, "device");
        thermal_cooling_device_unregister(fan->cdev);
index f31544d3656e3dfb400ce86d51ea46679eafde3b..4ae93350b70dec24ef5a3e44dd35ebdee874c897 100644 (file)
@@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
  *
  * Return: The cache structure and the level we terminated with.
  */
-static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
-                               int local_level,
-                               struct acpi_subtable_header *res,
-                               struct acpi_pptt_cache **found,
-                               int level, int type)
+static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
+                                        unsigned int local_level,
+                                        struct acpi_subtable_header *res,
+                                        struct acpi_pptt_cache **found,
+                                        unsigned int level, int type)
 {
        struct acpi_pptt_cache *cache;
 
@@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
                        if (*found != NULL && cache != *found)
                                pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
 
-                       pr_debug("Found cache @ level %d\n", level);
+                       pr_debug("Found cache @ level %u\n", level);
                        *found = cache;
                        /*
                         * continue looking at this node's resource list
@@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
        return local_level;
 }
 
-static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr,
-                                                    struct acpi_pptt_processor *cpu_node,
-                                                    int *starting_level, int level,
-                                                    int type)
+static struct acpi_pptt_cache *
+acpi_find_cache_level(struct acpi_table_header *table_hdr,
+                     struct acpi_pptt_processor *cpu_node,
+                     unsigned int *starting_level, unsigned int level,
+                     int type)
 {
        struct acpi_subtable_header *res;
-       int number_of_levels = *starting_level;
+       unsigned int number_of_levels = *starting_level;
        int resource = 0;
        struct acpi_pptt_cache *ret = NULL;
-       int local_level;
+       unsigned int local_level;
 
        /* walk down from processor node */
        while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
@@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
                                                    unsigned int level,
                                                    struct acpi_pptt_processor **node)
 {
-       int total_levels = 0;
+       unsigned int total_levels = 0;
        struct acpi_pptt_cache *found = NULL;
        struct acpi_pptt_processor *cpu_node;
        u8 acpi_type = acpi_cache_type(type);
 
-       pr_debug("Looking for CPU %d's level %d cache type %d\n",
+       pr_debug("Looking for CPU %d's level %u cache type %d\n",
                 acpi_cpu_id, level, acpi_type);
 
        cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
index 2ae95df2e74f8861c0c8a50eec3332f4b0dc3318..dcc289e30166cf78d2c33932dcbe304ee08bc5ff 100644 (file)
@@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 
 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 {
-       acpi_status status;
-       u64 count;
-       int current_count;
-       int i, ret = 0;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *cst;
+       int ret;
 
        if (nocst)
                return -ENODEV;
 
-       current_count = 0;
-
-       status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
-       if (ACPI_FAILURE(status)) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
-               return -ENODEV;
-       }
-
-       cst = buffer.pointer;
-
-       /* There must be at least 2 elements */
-       if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
-               pr_err("not enough elements in _CST\n");
-               ret = -EFAULT;
-               goto end;
-       }
-
-       count = cst->package.elements[0].integer.value;
+       ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
+       if (ret)
+               return ret;
 
-       /* Validate number of power states. */
-       if (count < 1 || count != cst->package.count - 1) {
-               pr_err("count given by _CST is not valid\n");
-               ret = -EFAULT;
-               goto end;
-       }
+       /*
+        * It is expected that there will be at least 2 states, C1 and
+        * something else (C2 or C3), so fail if that is not the case.
+        */
+       if (pr->power.count < 2)
+               return -EFAULT;
 
-       /* Tell driver that at least _CST is supported. */
        pr->flags.has_cst = 1;
-
-       for (i = 1; i <= count; i++) {
-               union acpi_object *element;
-               union acpi_object *obj;
-               struct acpi_power_register *reg;
-               struct acpi_processor_cx cx;
-
-               memset(&cx, 0, sizeof(cx));
-
-               element = &(cst->package.elements[i]);
-               if (element->type != ACPI_TYPE_PACKAGE)
-                       continue;
-
-               if (element->package.count != 4)
-                       continue;
-
-               obj = &(element->package.elements[0]);
-
-               if (obj->type != ACPI_TYPE_BUFFER)
-                       continue;
-
-               reg = (struct acpi_power_register *)obj->buffer.pointer;
-
-               if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
-                   (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
-                       continue;
-
-               /* There should be an easy way to extract an integer... */
-               obj = &(element->package.elements[1]);
-               if (obj->type != ACPI_TYPE_INTEGER)
-                       continue;
-
-               cx.type = obj->integer.value;
-               /*
-                * Some buggy BIOSes won't list C1 in _CST -
-                * Let acpi_processor_get_power_info_default() handle them later
-                */
-               if (i == 1 && cx.type != ACPI_STATE_C1)
-                       current_count++;
-
-               cx.address = reg->address;
-               cx.index = current_count + 1;
-
-               cx.entry_method = ACPI_CSTATE_SYSTEMIO;
-               if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
-                       if (acpi_processor_ffh_cstate_probe
-                                       (pr->id, &cx, reg) == 0) {
-                               cx.entry_method = ACPI_CSTATE_FFH;
-                       } else if (cx.type == ACPI_STATE_C1) {
-                               /*
-                                * C1 is a special case where FIXED_HARDWARE
-                                * can be handled in non-MWAIT way as well.
-                                * In that case, save this _CST entry info.
-                                * Otherwise, ignore this info and continue.
-                                */
-                               cx.entry_method = ACPI_CSTATE_HALT;
-                               snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
-                       } else {
-                               continue;
-                       }
-                       if (cx.type == ACPI_STATE_C1 &&
-                           (boot_option_idle_override == IDLE_NOMWAIT)) {
-                               /*
-                                * In most cases the C1 space_id obtained from
-                                * _CST object is FIXED_HARDWARE access mode.
-                                * But when the option of idle=halt is added,
-                                * the entry_method type should be changed from
-                                * CSTATE_FFH to CSTATE_HALT.
-                                * When the option of idle=nomwait is added,
-                                * the C1 entry_method type should be
-                                * CSTATE_HALT.
-                                */
-                               cx.entry_method = ACPI_CSTATE_HALT;
-                               snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
-                       }
-               } else {
-                       snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
-                                cx.address);
-               }
-
-               if (cx.type == ACPI_STATE_C1) {
-                       cx.valid = 1;
-               }
-
-               obj = &(element->package.elements[2]);
-               if (obj->type != ACPI_TYPE_INTEGER)
-                       continue;
-
-               cx.latency = obj->integer.value;
-
-               obj = &(element->package.elements[3]);
-               if (obj->type != ACPI_TYPE_INTEGER)
-                       continue;
-
-               current_count++;
-               memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
-
-               /*
-                * We support total ACPI_PROCESSOR_MAX_POWER - 1
-                * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
-                */
-               if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
-                       pr_warn("Limiting number of power states to max (%d)\n",
-                               ACPI_PROCESSOR_MAX_POWER);
-                       pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
-                       break;
-               }
-       }
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
-                         current_count));
-
-       /* Validate number of power states discovered */
-       if (current_count < 2)
-               ret = -EFAULT;
-
-      end:
-       kfree(buffer.pointer);
-
-       return ret;
+       return 0;
 }
 
 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
@@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
 
 static inline void acpi_processor_cstate_first_run_checks(void)
 {
-       acpi_status status;
        static int first_run;
 
        if (first_run)
@@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void)
                          max_cstate);
        first_run++;
 
-       if (acpi_gbl_FADT.cst_control && !nocst) {
-               status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
-                                           acpi_gbl_FADT.cst_control, 8);
-               if (ACPI_FAILURE(status))
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Notifying BIOS of _CST ability failed"));
-       }
+       if (nocst)
+               return;
+
+       acpi_processor_claim_cst_control();
 }
 #else
 
index 6747a279621bf3eb7b494b18e856fb8185a65965..4398806298398a78a3160ea13d212a2627a3f9c3 100644 (file)
@@ -61,8 +61,11 @@ static struct notifier_block tts_notifier = {
 static int acpi_sleep_prepare(u32 acpi_state)
 {
 #ifdef CONFIG_ACPI_SLEEP
+       unsigned long acpi_wakeup_address;
+
        /* do we have a wakeup address for S2 and S3? */
        if (acpi_state == ACPI_STATE_S3) {
+               acpi_wakeup_address = acpi_get_wakeup_address();
                if (!acpi_wakeup_address)
                        return -EFAULT;
                acpi_set_waking_vector(acpi_wakeup_address);
index 31014c7d3793906be9ee4eeba48c48f199a7f57d..419f814d596ac7f36113d2ee271d2d43b84ff988 100644 (file)
@@ -302,6 +302,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
                },
        },
+       {
+        .callback = video_detect_force_native,
+        .ident = "Lenovo E41-25",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
+               },
+       },
+       {
+        .callback = video_detect_force_native,
+        .ident = "Lenovo E41-45",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+               },
+       },
        {
         /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
         .callback = video_detect_force_native,
@@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
                },
        },
+
+       /*
+        * Desktops which falsely report a backlight and which our heuristics
+        * for this do not catch.
+        */
        {
         .callback = video_detect_force_none,
         .ident = "Dell OptiPlex 9020M",
@@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
                },
        },
+       {
+        .callback = video_detect_force_none,
+        .ident = "MSI MS-7721",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+               },
+       },
        { },
 };
 
index e9bc9fcc7ea5cb8213fdf73bc855b86f2c947341..b2dad43dbf82962b76c47c0a87d44bd15541e1de 100644 (file)
@@ -3310,7 +3310,7 @@ static void binder_transaction(struct binder_proc *proc,
                        binder_size_t parent_offset;
                        struct binder_fd_array_object *fda =
                                to_binder_fd_array_object(hdr);
-                       size_t num_valid = (buffer_offset - off_start_offset) *
+                       size_t num_valid = (buffer_offset - off_start_offset) /
                                                sizeof(binder_size_t);
                        struct binder_buffer_object *parent =
                                binder_validate_ptr(target_proc, t->buffer,
@@ -3384,7 +3384,7 @@ static void binder_transaction(struct binder_proc *proc,
                                t->buffer->user_data + sg_buf_offset;
                        sg_buf_offset += ALIGN(bp->length, sizeof(u64));
 
-                       num_valid = (buffer_offset - off_start_offset) *
+                       num_valid = (buffer_offset - off_start_offset) /
                                        sizeof(binder_size_t);
                        ret = binder_fixup_parent(t, thread, bp,
                                                  off_start_offset,
index 46dc54d18f0b7ca13a542cef88fb798c4b1023b1..2a04e8abd3977ddbd677bbcd172e9311a8759dc2 100644 (file)
@@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
        void *cmd_tbl;
        u32 opts;
        const u32 cmd_fis_len = 5; /* five dwords */
-       unsigned int n_elem;
 
        /*
         * Fill in command table information.  First, the header,
@@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
                memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
        }
 
-       n_elem = 0;
        if (qc->flags & ATA_QCFLAG_DMAMAP)
-               n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+               acard_ahci_fill_sg(qc, cmd_tbl);
 
        /*
         * Fill in command slot information.
index f41744b9b38a6d5b47e11f5b98777a6b195be3e2..6853dbb4131d8765ff8f1b013f0ba29e6e67bc22 100644 (file)
@@ -73,11 +73,11 @@ enum brcm_ahci_version {
        BRCM_SATA_BCM7425 = 1,
        BRCM_SATA_BCM7445,
        BRCM_SATA_NSP,
+       BRCM_SATA_BCM7216,
 };
 
 enum brcm_ahci_quirks {
-       BRCM_AHCI_QUIRK_NO_NCQ          = BIT(0),
-       BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
+       BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
 };
 
 struct brcm_ahci_priv {
@@ -213,19 +213,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
                        brcm_sata_phy_disable(priv, i);
 }
 
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
                                  struct brcm_ahci_priv *priv)
 {
-       void __iomem *ahci;
-       struct resource *res;
        u32 impl;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
-       ahci = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(ahci))
-               return 0;
-
-       impl = readl(ahci + HOST_PORTS_IMPL);
+       impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
 
        if (fls(impl) > SATA_TOP_MAX_PHYS)
                dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -233,9 +226,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
        else if (!impl)
                dev_info(priv->dev, "no ports found\n");
 
-       devm_iounmap(&pdev->dev, ahci);
-       devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
        return impl;
 }
 
@@ -285,6 +275,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
        /* Perform the SATA PHY reset sequence */
        brcm_sata_phy_disable(priv, ap->port_no);
 
+       /* Reset the SATA clock */
+       ahci_platform_disable_clks(hpriv);
+       msleep(10);
+
+       ahci_platform_enable_clks(hpriv);
+       msleep(10);
+
        /* Bring the PHY back on */
        brcm_sata_phy_enable(priv, ap->port_no);
 
@@ -341,7 +338,6 @@ static const struct ata_port_info ahci_brcm_port_info = {
        .port_ops       = &ahci_brcm_platform_ops,
 };
 
-#ifdef CONFIG_PM_SLEEP
 static int brcm_ahci_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
@@ -349,23 +345,70 @@ static int brcm_ahci_suspend(struct device *dev)
        struct brcm_ahci_priv *priv = hpriv->plat_data;
        int ret;
 
-       ret = ahci_platform_suspend(dev);
        brcm_sata_phys_disable(priv);
+
+       if (IS_ENABLED(CONFIG_PM_SLEEP))
+               ret = ahci_platform_suspend(dev);
+       else
+               ret = 0;
+
+       if (priv->version != BRCM_SATA_BCM7216)
+               reset_control_assert(priv->rcdev);
+
        return ret;
 }
 
-static int brcm_ahci_resume(struct device *dev)
+static int __maybe_unused brcm_ahci_resume(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
        struct ahci_host_priv *hpriv = host->private_data;
        struct brcm_ahci_priv *priv = hpriv->plat_data;
+       int ret = 0;
+
+       if (priv->version == BRCM_SATA_BCM7216)
+               ret = reset_control_reset(priv->rcdev);
+       else
+               ret = reset_control_deassert(priv->rcdev);
+       if (ret)
+               return ret;
+
+       /* Make sure clocks are turned on before re-configuration */
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret)
+               return ret;
 
        brcm_sata_init(priv);
        brcm_sata_phys_enable(priv);
        brcm_sata_alpm_init(hpriv);
-       return ahci_platform_resume(dev);
+
+       /* Since we had to enable clocks earlier on, we cannot use
+        * ahci_platform_resume() as-is since a second call to
+        * ahci_platform_enable_resources() would bump up the resources
+        * (regulators, clocks, PHYs) count artificially so we copy the part
+        * after ahci_platform_enable_resources().
+        */
+       ret = ahci_platform_enable_phys(hpriv);
+       if (ret)
+               goto out_disable_phys;
+
+       ret = ahci_platform_resume_host(dev);
+       if (ret)
+               goto out_disable_platform_phys;
+
+       /* We resumed so update PM runtime state */
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       return 0;
+
+out_disable_platform_phys:
+       ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+       brcm_sata_phys_disable(priv);
+       ahci_platform_disable_clks(hpriv);
+       return ret;
 }
-#endif
 
 static struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT(DRV_NAME),
@@ -376,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = {
        {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
        {.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
        {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+       {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
        {},
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -384,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
 {
        const struct of_device_id *of_id;
        struct device *dev = &pdev->dev;
+       const char *reset_name = NULL;
        struct brcm_ahci_priv *priv;
        struct ahci_host_priv *hpriv;
        struct resource *res;
@@ -405,49 +450,86 @@ static int brcm_ahci_probe(struct platform_device *pdev)
        if (IS_ERR(priv->top_ctrl))
                return PTR_ERR(priv->top_ctrl);
 
-       /* Reset is optional depending on platform */
-       priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
-       if (!IS_ERR_OR_NULL(priv->rcdev))
-               reset_control_deassert(priv->rcdev);
+       /* Reset is optional depending on platform and named differently */
+       if (priv->version == BRCM_SATA_BCM7216)
+               reset_name = "rescal";
+       else
+               reset_name = "ahci";
+
+       priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+       if (IS_ERR(priv->rcdev))
+               return PTR_ERR(priv->rcdev);
+
+       hpriv = ahci_platform_get_resources(pdev, 0);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
 
-       if ((priv->version == BRCM_SATA_BCM7425) ||
-               (priv->version == BRCM_SATA_NSP)) {
-               priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+       hpriv->plat_data = priv;
+       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
+
+       switch (priv->version) {
+       case BRCM_SATA_BCM7425:
+               hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
+               /* fall through */
+       case BRCM_SATA_NSP:
+               hpriv->flags |= AHCI_HFLAG_NO_NCQ;
                priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+               break;
+       default:
+               break;
        }
 
+       if (priv->version == BRCM_SATA_BCM7216)
+               ret = reset_control_reset(priv->rcdev);
+       else
+               ret = reset_control_deassert(priv->rcdev);
+       if (ret)
+               return ret;
+
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret)
+               goto out_reset;
+
+       /* Must be first so as to configure endianness including that
+        * of the standard AHCI register space.
+        */
        brcm_sata_init(priv);
 
-       priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
-       if (!priv->port_mask)
-               return -ENODEV;
+       /* Initializes priv->port_mask which is used below */
+       priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+       if (!priv->port_mask) {
+               ret = -ENODEV;
+               goto out_disable_clks;
+       }
 
+       /* Must be done before ahci_platform_enable_phys() */
        brcm_sata_phys_enable(priv);
 
-       hpriv = ahci_platform_get_resources(pdev, 0);
-       if (IS_ERR(hpriv))
-               return PTR_ERR(hpriv);
-       hpriv->plat_data = priv;
-       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
-
        brcm_sata_alpm_init(hpriv);
 
-       ret = ahci_platform_enable_resources(hpriv);
+       ret = ahci_platform_enable_phys(hpriv);
        if (ret)
-               return ret;
-
-       if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
-               hpriv->flags |= AHCI_HFLAG_NO_NCQ;
-       hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+               goto out_disable_phys;
 
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
                                      &ahci_platform_sht);
        if (ret)
-               return ret;
+               goto out_disable_platform_phys;
 
        dev_info(dev, "Broadcom AHCI SATA3 registered\n");
 
        return 0;
+
+out_disable_platform_phys:
+       ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+       brcm_sata_phys_disable(priv);
+out_disable_clks:
+       ahci_platform_disable_clks(hpriv);
+out_reset:
+       if (priv->version != BRCM_SATA_BCM7216)
+               reset_control_assert(priv->rcdev);
+       return ret;
 }
 
 static int brcm_ahci_remove(struct platform_device *pdev)
@@ -457,20 +539,35 @@ static int brcm_ahci_remove(struct platform_device *pdev)
        struct brcm_ahci_priv *priv = hpriv->plat_data;
        int ret;
 
+       brcm_sata_phys_disable(priv);
+
        ret = ata_platform_remove_one(pdev);
        if (ret)
                return ret;
 
-       brcm_sata_phys_disable(priv);
-
        return 0;
 }
 
+static void brcm_ahci_shutdown(struct platform_device *pdev)
+{
+       int ret;
+
+       /* All resources releasing happens via devres, but our device, unlike a
+        * proper remove is not disappearing, therefore using
+        * brcm_ahci_suspend() here which does explicit power management is
+        * appropriate.
+        */
+       ret = brcm_ahci_suspend(&pdev->dev);
+       if (ret)
+               dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
 static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
 
 static struct platform_driver brcm_ahci_driver = {
        .probe = brcm_ahci_probe,
        .remove = brcm_ahci_remove,
+       .shutdown = brcm_ahci_shutdown,
        .driver = {
                .name = DRV_NAME,
                .of_match_table = ahci_of_match,
index 8befce036af84810b0a3c815829b87f6fd904d8c..129556fcf6be760ce48834873ea3ea057c4bd8a5 100644 (file)
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
  * RETURNS:
  * 0 on success otherwise a negative error code
  */
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
 {
        int rc, i;
 
@@ -74,6 +74,7 @@ disable_phys:
        }
        return rc;
 }
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
 
 /**
  * ahci_platform_disable_phys - Disable PHYs
@@ -81,7 +82,7 @@ disable_phys:
  *
  * This function disables all PHYs found in hpriv->phys.
  */
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
 {
        int i;
 
@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
                phy_exit(hpriv->phys[i]);
        }
 }
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
 
 /**
  * ahci_platform_enable_clks - Enable platform clocks
index e9017c570bc51667c127847eb13561f09333794d..6f4ab5c5b52dde6f072a9eea90427d7fd8a7a791 100644 (file)
@@ -5328,6 +5328,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
        }
 }
 
+/**
+ *     ata_qc_get_active - get bitmask of active qcs
+ *     @ap: port in question
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ *     RETURNS:
+ *     Bitmask of active qcs
+ */
+u64 ata_qc_get_active(struct ata_port *ap)
+{
+       u64 qc_active = ap->qc_active;
+
+       /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
+       if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+               qc_active |= (1 << 0);
+               qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
+       }
+
+       return qc_active;
+}
+EXPORT_SYMBOL_GPL(ata_qc_get_active);
+
 /**
  *     ata_qc_complete_multiple - Complete multiple qcs successfully
  *     @ap: port in question
index 135173c8d13891a952ed12afe985e34e082a0f69..391dff0f25a2ba61943ddd63b26c11b4c27cc9e7 100644 (file)
@@ -824,7 +824,7 @@ static int arasan_cf_probe(struct platform_device *pdev)
                quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
 
        acdev->pbase = res->start;
-       acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
+       acdev->vbase = devm_ioremap(&pdev->dev, res->start,
                        resource_size(res));
        if (!acdev->vbase) {
                dev_warn(&pdev->dev, "ioremap fail\n");
index 1bfd0154dad5db964e49e90f4acdff5d7f45ce0f..e47a28271f5bb46334d043e8bb69ab2cf8f8230d 100644 (file)
@@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv)
        priv->aapl_bus_id =  bidp ? *bidp : 0;
 
        /* Fixup missing Apple bus ID in case of media-bay */
-       if (priv->mediabay && bidp == 0)
+       if (priv->mediabay && !bidp)
                priv->aapl_bus_id = 1;
 }
 
index d3d851b014a3765cd50d0f01533ec6feb4ae3f00..bd87476ab48136f19286a94a21d0735b1a944ff5 100644 (file)
@@ -891,7 +891,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
                                        of_node_put(dma_node);
                                        return -EINVAL;
                                }
-                               cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+                               cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
                                                                         resource_size(res_dma));
                                if (!cf_port->dma_base) {
                                        of_node_put(dma_node);
@@ -909,7 +909,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
                if (!res_cs1)
                        return -EINVAL;
 
-               cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
+               cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
                                           resource_size(res_cs1));
                if (!cs1)
                        return rv;
@@ -925,7 +925,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
        if (!res_cs0)
                return -EINVAL;
 
-       cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+       cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
                                   resource_size(res_cs0));
        if (!cs0)
                return rv;
index deae466395de1a656d5cba27720fb8f345e6c332..479c4b29b856261200740be36c1bd72a57027b81 100644 (file)
@@ -140,7 +140,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
        info->gpio_line = gpiod;
        info->irq = irq;
 
-       info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+       info->iobase = devm_ioremap(&pdev->dev, res->start,
                                resource_size(res));
        if (!info->iobase)
                return -ENOMEM;
index 9239615d8a0472967fa534f0dcd8523da944ea1a..d55ee244d6931fcae2089dce4e3a07409685276d 100644 (file)
@@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
                                     i, ioread32(hcr_base + CC),
                                     ioread32(hcr_base + CA));
                }
-               ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+               ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
                return;
 
        } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
index 277f11909fc1ab37338f3390781db00814104a6a..d7228f8e9297c001e730c7f62c8901ebad20715f 100644 (file)
@@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
        }
 
        if (work_done) {
-               ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+               ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
 
                /* Update the software queue position index in hardware */
                writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
index f3e62f5528bdb17735f28d847f8cc485bc2e5164..eb9dc14e5147aaebbc210c1481ce146a6d9c57a4 100644 (file)
@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
                                        check_commands = 0;
                                check_commands &= ~(1 << pos);
                        }
-                       ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+                       ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
                }
        }
 
index b23d1e4bad33b20653b213047d815f388dfd0d47..17d47ad03ab79e33cd8c57a453a84505e4530c82 100644 (file)
 #include "suni.h"
 #include "eni.h"
 
-#if !defined(__i386__) && !defined(__x86_64__)
-#ifndef ioremap_nocache
-#define ioremap_nocache(X,Y) ioremap(X,Y)
-#endif 
-#endif
-
 /*
  * TODO:
  *
@@ -374,7 +368,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
                here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
                dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
                    << MID_DMA_VCI_SHIFT) | MID_DT_JK;
-               j++;
+               dma[j++] = 0;
        }
        here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
        if (!eff) size += skip;
@@ -447,7 +441,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
        if (size != eff) {
                dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
                    (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
-               j++;
+               dma[j++] = 0;
        }
        if (!j || j > 2*RX_DMA_BUF) {
                printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
@@ -1725,7 +1719,7 @@ static int eni_do_init(struct atm_dev *dev)
        }
        printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,",
            dev->number,pci_dev->revision,real_base,eni_dev->irq);
-       if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) {
+       if (!(base = ioremap(real_base,MAP_MAX_SIZE))) {
                printk("\n");
                printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
                    "mapping\n",dev->number);
index aad00d2b28f51485c0b12eb42ac37a3b5419200b..cc87004d5e2d6235b4f1196c0d47a8ee81bff64c 100644 (file)
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                        }
                        if (!to) {
                                printk ("No more free channels for FS50..\n");
+                               kfree(vcc);
                                return -EBUSY;
                        }
                        vcc->channo = dev->channo;
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                        if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
                            ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
                                printk ("Channel is in use for FS155.\n");
+                               kfree(vcc);
                                return -EBUSY;
                        }
                }
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                            tc, sizeof (struct fs_transmit_config));
                if (!tc) {
                        fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+                       kfree(vcc);
                        return -ENOMEM;
                }
 
index 30d0523014e0d49e0ac6e16880a7bb08ecfbef27..6cdbf15312387f93455bafb2b816e691def1cb94 100644 (file)
@@ -359,7 +359,7 @@ static int handle_remove(const char *nodename, struct device *dev)
  * If configured, or requested by the commandline, devtmpfs will be
  * auto-mounted after the kernel mounted the root filesystem.
  */
-int devtmpfs_mount(const char *mntdir)
+int devtmpfs_mount(void)
 {
        int err;
 
@@ -369,7 +369,7 @@ int devtmpfs_mount(const char *mntdir)
        if (!thread)
                return 0;
 
-       err = ksys_mount("devtmpfs", mntdir, "devtmpfs", MS_SILENT, NULL);
+       err = do_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL);
        if (err)
                printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
        else
@@ -394,7 +394,7 @@ static int devtmpfsd(void *p)
        *err = ksys_unshare(CLONE_NEWNS);
        if (*err)
                goto out;
-       *err = ksys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+       *err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
        if (*err)
                goto out;
        ksys_chdir("/.."); /* will traverse into overmounted root */
index 4a66888e7253d34d00674824d7fab6593dd794fd..5fa7ce3745a0d63e7c7736de89386e06ee38fe0c 100644 (file)
@@ -17,7 +17,7 @@ PROGBITS  = $(if $(CONFIG_ARM),%,@)progbits
 filechk_fwbin = \
        echo "/* Generated by $(src)/Makefile */"               ;\
        echo "    .section .rodata"                             ;\
-       echo "    .p2align $(ASM_ALIGN)"                        ;\
+       echo "    .p2align 4"                                   ;\
        echo "_fw_$(FWSTR)_bin:"                                ;\
        echo "    .incbin \"$(fwdir)/$(FWNAME)\""               ;\
        echo "_fw_end:"                                         ;\
index 7c532548b0a62d8d0ac99d8d052de4a8d045ef18..cf6b6b722e5c91612b93a52e59b27e06a4aa8877 100644 (file)
@@ -1325,10 +1325,14 @@ struct device *platform_find_device_by_driver(struct device *start,
 }
 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
 
+void __weak __init early_platform_cleanup(void) { }
+
 int __init platform_bus_init(void)
 {
        int error;
 
+       early_platform_cleanup();
+
        error = device_register(&platform_bus);
        if (error) {
                put_device(&platform_bus);
index 48616f358854a4562222877b5dd779e4d1434460..16134a69bf6f5eac2d20f215a81a037e57ef3e7d 100644 (file)
@@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
        int retval;
 
        if (rpmflags & RPM_GET_PUT) {
-               if (!atomic_dec_and_test(&dev->power.usage_count))
+               if (!atomic_dec_and_test(&dev->power.usage_count)) {
+                       trace_rpm_usage_rcuidle(dev, rpmflags);
                        return 0;
+               }
        }
 
        might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
        int retval;
 
        if (rpmflags & RPM_GET_PUT) {
-               if (!atomic_dec_and_test(&dev->power.usage_count))
+               if (!atomic_dec_and_test(&dev->power.usage_count)) {
+                       trace_rpm_usage_rcuidle(dev, rpmflags);
                        return 0;
+               }
        }
 
        might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev)
        retval = dev->power.disable_depth > 0 ? -EINVAL :
                dev->power.runtime_status == RPM_ACTIVE
                        && atomic_inc_not_zero(&dev->power.usage_count);
+       trace_rpm_usage_rcuidle(dev, 0);
        spin_unlock_irqrestore(&dev->power.lock, flags);
        return retval;
 }
@@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev)
        dev->power.runtime_auto = true;
        if (atomic_dec_and_test(&dev->power.usage_count))
                rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+       else
+               trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
 
  out:
        spin_unlock_irq(&dev->power.lock);
@@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
                if (!old_use || old_delay >= 0) {
                        atomic_inc(&dev->power.usage_count);
                        rpm_resume(dev, 0);
+               } else {
+                       trace_rpm_usage_rcuidle(dev, 0);
                }
        }
 
index 70a9edb5f5252fe17e88e9fd7185e0e6ac71eff0..27f3e60608e590d90cbcfaf58f3ca35f0e6dae6e 100644 (file)
@@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
                break;
        }
 
+       if (!next_ws)
+               print_wakeup_source_stats(m, &deleted_ws);
+
        return next_ws;
 }
 
index ac9b31c57967d53f43d0d016e3c44d888509d7a2..008f8da69d973183635768b52cc7e1b0119363dd 100644 (file)
@@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
        return i2c_smbus_write_byte_data(i2c, reg, val);
 }
 
-static struct regmap_bus regmap_smbus_byte = {
+static const struct regmap_bus regmap_smbus_byte = {
        .reg_write = regmap_smbus_byte_reg_write,
        .reg_read = regmap_smbus_byte_reg_read,
 };
@@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
        return i2c_smbus_write_word_data(i2c, reg, val);
 }
 
-static struct regmap_bus regmap_smbus_word = {
+static const struct regmap_bus regmap_smbus_word = {
        .reg_write = regmap_smbus_word_reg_write,
        .reg_read = regmap_smbus_word_reg_read,
 };
@@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
        return i2c_smbus_write_word_swapped(i2c, reg, val);
 }
 
-static struct regmap_bus regmap_smbus_word_swapped = {
+static const struct regmap_bus regmap_smbus_word_swapped = {
        .reg_write = regmap_smbus_word_write_swapped,
        .reg_read = regmap_smbus_word_read_swapped,
 };
@@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context,
                return -EIO;
 }
 
-static struct regmap_bus regmap_i2c = {
+static const struct regmap_bus regmap_i2c = {
        .write = regmap_i2c_write,
        .gather_write = regmap_i2c_gather_write,
        .read = regmap_i2c_read,
@@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
                return -EIO;
 }
 
-static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
        .write = regmap_i2c_smbus_i2c_write,
        .read = regmap_i2c_smbus_i2c_read,
        .max_raw_read = I2C_SMBUS_BLOCK_MAX,
index 19f57ccfbe1d71478f769cf886b5efecd11b19da..59f911e577192257667ab394038fa2ff56f3167f 100644 (file)
@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
 
        WARN_ON(!map->bus);
 
-       /* Check for unwritable registers before we start */
-       for (i = 0; i < val_len / map->format.val_bytes; i++)
-               if (!regmap_writeable(map,
-                                    reg + regmap_get_offset(map, i)))
-                       return -EINVAL;
+       /* Check for unwritable or noinc registers in range
+        * before we start
+        */
+       if (!regmap_writeable_noinc(map, reg)) {
+               for (i = 0; i < val_len / map->format.val_bytes; i++) {
+                       unsigned int element =
+                               reg + regmap_get_offset(map, i);
+                       if (!regmap_writeable(map, element) ||
+                               regmap_writeable_noinc(map, element))
+                               return -EINVAL;
+               }
+       }
 
        if (!map->cache_bypass && map->format.parse_val) {
                unsigned int ival;
index d8d0dc0ca5acf289c8a712cd2a9d78dc5c6fcf24..0b081dee1e95cce1b71b76131fce56d1d1b6f3b3 100644 (file)
@@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop)
        if (!prop->length)
                return NULL;
 
-       if (prop->is_array)
-               return prop->pointer;
-
-       return &prop->value;
+       return prop->is_inline ? &prop->value : prop->pointer;
 }
 
 static const void *property_entry_find(const struct property_entry *props,
@@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props,
 
 static void property_entry_free_data(const struct property_entry *p)
 {
-       const void *pointer = property_get_pointer(p);
        const char * const *src_str;
        size_t i, nval;
 
-       if (p->is_array) {
-               if (p->type == DEV_PROP_STRING && p->pointer) {
-                       src_str = p->pointer;
-                       nval = p->length / sizeof(const char *);
-                       for (i = 0; i < nval; i++)
-                               kfree(src_str[i]);
-               }
-               kfree(pointer);
-       } else if (p->type == DEV_PROP_STRING) {
-               kfree(p->value.str);
+       if (p->type == DEV_PROP_STRING) {
+               src_str = property_get_pointer(p);
+               nval = p->length / sizeof(*src_str);
+               for (i = 0; i < nval; i++)
+                       kfree(src_str[i]);
        }
+
+       if (!p->is_inline)
+               kfree(p->pointer);
+
        kfree(p->name);
 }
 
-static const char * const *
-property_copy_string_array(const struct property_entry *src)
+static bool property_copy_string_array(const char **dst_ptr,
+                                      const char * const *src_ptr,
+                                      size_t nval)
 {
-       const char **d;
-       const char * const *src_str = src->pointer;
-       size_t nval = src->length / sizeof(*d);
        int i;
 
-       d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
-       if (!d)
-               return NULL;
-
        for (i = 0; i < nval; i++) {
-               d[i] = kstrdup(src_str[i], GFP_KERNEL);
-               if (!d[i] && src_str[i]) {
+               dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
+               if (!dst_ptr[i] && src_ptr[i]) {
                        while (--i >= 0)
-                               kfree(d[i]);
-                       kfree(d);
-                       return NULL;
+                               kfree(dst_ptr[i]);
+                       return false;
                }
        }
 
-       return d;
+       return true;
 }
 
 static int property_entry_copy_data(struct property_entry *dst,
                                    const struct property_entry *src)
 {
        const void *pointer = property_get_pointer(src);
-       const void *new;
-
-       if (src->is_array) {
-               if (!src->length)
-                       return -ENODATA;
-
-               if (src->type == DEV_PROP_STRING) {
-                       new = property_copy_string_array(src);
-                       if (!new)
-                               return -ENOMEM;
-               } else {
-                       new = kmemdup(pointer, src->length, GFP_KERNEL);
-                       if (!new)
-                               return -ENOMEM;
-               }
+       void *dst_ptr;
+       size_t nval;
+
+       /*
+        * Properties with no data should not be marked as stored
+        * out of line.
+        */
+       if (!src->is_inline && !src->length)
+               return -ENODATA;
+
+       /*
+        * Reference properties are never stored inline as
+        * they are too big.
+        */
+       if (src->type == DEV_PROP_REF && src->is_inline)
+               return -EINVAL;
 
-               dst->is_array = true;
-               dst->pointer = new;
-       } else if (src->type == DEV_PROP_STRING) {
-               new = kstrdup(src->value.str, GFP_KERNEL);
-               if (!new && src->value.str)
+       if (src->length <= sizeof(dst->value)) {
+               dst_ptr = &dst->value;
+               dst->is_inline = true;
+       } else {
+               dst_ptr = kmalloc(src->length, GFP_KERNEL);
+               if (!dst_ptr)
                        return -ENOMEM;
+               dst->pointer = dst_ptr;
+       }
 
-               dst->value.str = new;
+       if (src->type == DEV_PROP_STRING) {
+               nval = src->length / sizeof(const char *);
+               if (!property_copy_string_array(dst_ptr, pointer, nval)) {
+                       if (!dst->is_inline)
+                               kfree(dst->pointer);
+                       return -ENOMEM;
+               }
        } else {
-               dst->value = src->value;
+               memcpy(dst_ptr, pointer, src->length);
        }
 
        dst->length = src->length;
        dst->type = src->type;
        dst->name = kstrdup(src->name, GFP_KERNEL);
-       if (!dst->name)
-               goto out_free_data;
+       if (!dst->name) {
+               property_entry_free_data(dst);
+               return -ENOMEM;
+       }
 
        return 0;
-
-out_free_data:
-       property_entry_free_data(dst);
-       return -ENOMEM;
 }
 
 /**
@@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
                                 struct fwnode_reference_args *args)
 {
        struct swnode *swnode = to_swnode(fwnode);
-       const struct software_node_reference *ref;
+       const struct software_node_ref_args *ref_array;
+       const struct software_node_ref_args *ref;
        const struct property_entry *prop;
        struct fwnode_handle *refnode;
+       u32 nargs_prop_val;
+       int error;
        int i;
 
-       if (!swnode || !swnode->node->references)
+       if (!swnode)
                return -ENOENT;
 
-       for (ref = swnode->node->references; ref->name; ref++)
-               if (!strcmp(ref->name, propname))
-                       break;
+       prop = property_entry_get(swnode->node->properties, propname);
+       if (!prop)
+               return -ENOENT;
+
+       if (prop->type != DEV_PROP_REF)
+               return -EINVAL;
 
-       if (!ref->name || index > (ref->nrefs - 1))
+       /*
+        * We expect that references are never stored inline, even
+        * single ones, as they are too big.
+        */
+       if (prop->is_inline)
+               return -EINVAL;
+
+       if (index * sizeof(*ref) >= prop->length)
                return -ENOENT;
 
-       refnode = software_node_fwnode(ref->refs[index].node);
+       ref_array = prop->pointer;
+       ref = &ref_array[index];
+
+       refnode = software_node_fwnode(ref->node);
        if (!refnode)
                return -ENOENT;
 
        if (nargs_prop) {
-               prop = property_entry_get(swnode->node->properties, nargs_prop);
-               if (!prop)
-                       return -EINVAL;
+               error = property_entry_read_int_array(swnode->node->properties,
+                                                     nargs_prop, sizeof(u32),
+                                                     &nargs_prop_val, 1);
+               if (error)
+                       return error;
 
-               nargs = prop->value.u32_data;
+               nargs = nargs_prop_val;
        }
 
        if (nargs > NR_FWNODE_REFERENCE_ARGS)
@@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
        args->nargs = nargs;
 
        for (i = 0; i < nargs; i++)
-               args->args[i] = ref->refs[index].args[i];
+               args->args[i] = ref->args[i];
 
        return 0;
 }
index 86e85daa80bf88d636abf45f45f4d2ad1c4674fc..305c7751184a29de43243dd8964277afa807a66c 100644 (file)
@@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE
          The module name will be test_async_driver_probe.ko
 
          If unsure say N.
+config KUNIT_DRIVER_PE_TEST
+       bool "KUnit Tests for property entry API"
+       depends on KUNIT=y
index 0f1f7277a01399d73b97dacc3ce34a49a1a9f512..3ca56367c84b729a7b7f3d0e7df7b61c9b652a46 100644 (file)
@@ -1,2 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)  += test_async_driver_probe.o
+
+obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
new file mode 100644 (file)
index 0000000..abe0331
--- /dev/null
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+// Unit tests for property entries API
+//
+// Copyright 2019 Google LLC.
+
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+static void pe_test_uints(struct kunit *test)
+{
+       static const struct property_entry entries[] = {
+               PROPERTY_ENTRY_U8("prop-u8", 8),
+               PROPERTY_ENTRY_U16("prop-u16", 16),
+               PROPERTY_ENTRY_U32("prop-u32", 32),
+               PROPERTY_ENTRY_U64("prop-u64", 64),
+               { }
+       };
+
+       struct fwnode_handle *node;
+       u8 val_u8, array_u8[2];
+       u16 val_u16, array_u16[2];
+       u32 val_u32, array_u32[2];
+       u64 val_u64, array_u64[2];
+       int error;
+
+       node = fwnode_create_software_node(entries, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+       error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+       error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+       error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+       error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+       error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+       error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+       error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+       error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+       error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       fwnode_remove_software_node(node);
+}
+
+static void pe_test_uint_arrays(struct kunit *test)
+{
+       static const u8 a_u8[16] = { 8, 9 };
+       static const u16 a_u16[16] = { 16, 17 };
+       static const u32 a_u32[16] = { 32, 33 };
+       static const u64 a_u64[16] = { 64, 65 };
+       static const struct property_entry entries[] = {
+               PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
+               PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
+               PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
+               PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
+               { }
+       };
+
+       struct fwnode_handle *node;
+       u8 val_u8, array_u8[32];
+       u16 val_u16, array_u16[32];
+       u32 val_u32, array_u32[32];
+       u64 val_u64, array_u64[32];
+       int error;
+
+       node = fwnode_create_software_node(entries, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+       error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+       error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+       error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+       KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9);
+
+       error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+       error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+       error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+       KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17);
+
+       error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+       error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+       error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+       KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33);
+
+       error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+       error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+       error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+       KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65);
+
+       error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       fwnode_remove_software_node(node);
+}
+
+static void pe_test_strings(struct kunit *test)
+{
+       static const char *strings[] = {
+               "string-a",
+               "string-b",
+       };
+
+       static const struct property_entry entries[] = {
+               PROPERTY_ENTRY_STRING("str", "single"),
+               PROPERTY_ENTRY_STRING("empty", ""),
+               PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
+               { }
+       };
+
+       struct fwnode_handle *node;
+       const char *str;
+       const char *strs[10];
+       int error;
+
+       node = fwnode_create_software_node(entries, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+       error = fwnode_property_read_string(node, "str", &str);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_STREQ(test, str, "single");
+
+       error = fwnode_property_read_string_array(node, "str", strs, 1);
+       KUNIT_EXPECT_EQ(test, error, 1);
+       KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+       /* asking for more data returns what we have */
+       error = fwnode_property_read_string_array(node, "str", strs, 2);
+       KUNIT_EXPECT_EQ(test, error, 1);
+       KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+       error = fwnode_property_read_string(node, "no-str", &str);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_read_string_array(node, "no-str", strs, 1);
+       KUNIT_EXPECT_LT(test, error, 0);
+
+       error = fwnode_property_read_string(node, "empty", &str);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_STREQ(test, str, "");
+
+       error = fwnode_property_read_string_array(node, "strs", strs, 3);
+       KUNIT_EXPECT_EQ(test, error, 2);
+       KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+       KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
+
+       error = fwnode_property_read_string_array(node, "strs", strs, 1);
+       KUNIT_EXPECT_EQ(test, error, 1);
+       KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+
+       /* NULL argument -> returns size */
+       error = fwnode_property_read_string_array(node, "strs", NULL, 0);
+       KUNIT_EXPECT_EQ(test, error, 2);
+
+       /* accessing array as single value */
+       error = fwnode_property_read_string(node, "strs", &str);
+       KUNIT_EXPECT_EQ(test, error, 0);
+       KUNIT_EXPECT_STREQ(test, str, "string-a");
+
+       fwnode_remove_software_node(node);
+}
+
+static void pe_test_bool(struct kunit *test)
+{
+       static const struct property_entry entries[] = {
+               PROPERTY_ENTRY_BOOL("prop"),
+               { }
+       };
+
+       struct fwnode_handle *node;
+
+       node = fwnode_create_software_node(entries, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+       KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
+       KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
+
+       fwnode_remove_software_node(node);
+}
+
+/* Verifies that small U8 array is stored inline when property is copied */
+static void pe_test_move_inline_u8(struct kunit *test)
+{
+       static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
+       static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
+       static const struct property_entry entries[] = {
+               PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
+               PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
+               { }
+       };
+
+       struct property_entry *copy;
+       const u8 *data_ptr;
+
+       copy = property_entries_dup(entries);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+       KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+       data_ptr = (u8 *)&copy[0].value;
+       KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1);
+       KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2);
+
+       KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+       data_ptr = copy[1].pointer;
+       KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5);
+       KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6);
+
+       property_entries_free(copy);
+}
+
+/* Verifies that single string array is stored inline when property is copied */
+static void pe_test_move_inline_str(struct kunit *test)
+{
+       static char *str_array_small[] = { "a" };
+       static char *str_array_big[] = { "b", "c", "d", "e" };
+       static char *str_array_small_empty[] = { "" };
+       static struct property_entry entries[] = {
+               PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
+               PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
+               PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
+               { }
+       };
+
+       struct property_entry *copy;
+       const char * const *data_ptr;
+
+       copy = property_entries_dup(entries);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+       KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+       KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
+
+       KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+       data_ptr = copy[1].pointer;
+       KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
+       KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
+
+       KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
+       KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
+
+       property_entries_free(copy);
+}
+
+/* Handling of reference properties */
+static void pe_test_reference(struct kunit *test)
+{
+       static const struct software_node nodes[] = {
+               { .name = "1", },
+               { .name = "2", },
+               { }
+       };
+
+       static const struct software_node_ref_args refs[] = {
+               {
+                       .node = &nodes[0],
+                       .nargs = 0,
+               },
+               {
+                       .node = &nodes[1],
+                       .nargs = 2,
+                       .args = { 3, 4 },
+               },
+       };
+
+       const struct property_entry entries[] = {
+               PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
+               PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+               PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
+               { }
+       };
+
+       struct fwnode_handle *node;
+       struct fwnode_reference_args ref;
+       int error;
+
+       error = software_node_register_nodes(nodes);
+       KUNIT_ASSERT_EQ(test, error, 0);
+
+       node = fwnode_create_software_node(entries, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+       error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+                                                  0, 0, &ref);
+       KUNIT_ASSERT_EQ(test, error, 0);
+       KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+       KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+       /* wrong index */
+       error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+                                                  0, 1, &ref);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+                                                  1, 0, &ref);
+       KUNIT_ASSERT_EQ(test, error, 0);
+       KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+       KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
+       KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+
+       /* asking for more args, padded with zero data */
+       error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+                                                  3, 0, &ref);
+       KUNIT_ASSERT_EQ(test, error, 0);
+       KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+       KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
+       KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+       KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
+       KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
+
+       /* wrong index */
+       error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+                                                  2, 1, &ref);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       /* array of references */
+       error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+                                                  0, 0, &ref);
+       KUNIT_ASSERT_EQ(test, error, 0);
+       KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+       KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+       /* second reference in the array */
+       error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+                                                  2, 1, &ref);
+       KUNIT_ASSERT_EQ(test, error, 0);
+       KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+       KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
+       KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
+       KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
+
+       /* wrong index */
+       error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+                                                  0, 2, &ref);
+       KUNIT_EXPECT_NE(test, error, 0);
+
+       fwnode_remove_software_node(node);
+       software_node_unregister_nodes(nodes);
+}
+
+static struct kunit_case property_entry_test_cases[] = {
+       KUNIT_CASE(pe_test_uints),
+       KUNIT_CASE(pe_test_uint_arrays),
+       KUNIT_CASE(pe_test_strings),
+       KUNIT_CASE(pe_test_bool),
+       KUNIT_CASE(pe_test_move_inline_u8),
+       KUNIT_CASE(pe_test_move_inline_str),
+       KUNIT_CASE(pe_test_reference),
+       { }
+};
+
+static struct kunit_suite property_entry_test_suite = {
+       .name = "property-entry",
+       .test_cases = property_entry_test_cases,
+};
+
+kunit_test_suite(property_entry_test_suite);
index 57f10b58b47cd3855ad117bb1ce57791e4c97532..c153c96a6145be2433cc42e4659f2fa63be3dbc5 100644 (file)
@@ -48,7 +48,7 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb)
                return 0;
 
        ccb->setup_done = 1;
-       ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE);
+       ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE);
        if (!ccb->mii)
                return -ENOMEM;
 
index c42cec7c7ecc0a88f649b315c8c8616a0439c2f8..88a93c266c19930f3cc6b4644c1b8ec8e7cc25d0 100644 (file)
@@ -115,7 +115,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
                if (unlikely(!addr))
                        goto out;
                err = -ENOMEM;
-               mmio = ioremap_nocache(addr, sizeof(val));
+               mmio = ioremap(addr, sizeof(val));
                if (!mmio)
                        goto out;
 
@@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
                if (unlikely(!addr))
                        goto out;
                err = -ENOMEM;
-               mmio = ioremap_nocache(addr, sizeof(val));
+               mmio = ioremap(addr, sizeof(val));
                if (!mmio)
                        goto out;
 
@@ -515,7 +515,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
        /* Ok, ready to run, register it to the system.
         * The following needs change, if we want to port hostmode
         * to non-MIPS platform. */
-       io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+       io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start,
                                                     resource_size(&pc_host->mem_resource));
        pc_host->pci_controller.io_map_base = io_map_base;
        set_io_port_base(pc_host->pci_controller.io_map_base);
index c8073b509a2ba541eb6fc515a23a13b805e7c90e..90d5bdc12e0337f28b213b219d72056972683531 100644 (file)
@@ -172,7 +172,7 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
        /* iomap only first core. We have to read some register on this core
         * to scan the bus.
         */
-       bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
+       bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
        if (!bus->mmio)
                return -ENOMEM;
 
index 4a2d1b235fb5af3e51ac8b044e6a18aed5753cec..fd546c51b076ad93c03a0f32ad37ee83b4a7434b 100644 (file)
@@ -425,11 +425,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                }
        }
        if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
-               core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+               core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE);
                if (!core->io_addr)
                        return -ENOMEM;
                if (core->wrap) {
-                       core->io_wrap = ioremap_nocache(core->wrap,
+                       core->io_wrap = ioremap(core->wrap,
                                                        BCMA_CORE_SIZE);
                        if (!core->io_wrap) {
                                iounmap(core->io_addr);
@@ -472,7 +472,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
 
        erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
        if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
-               eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+               eromptr = ioremap(erombase, BCMA_CORE_SIZE);
                if (!eromptr)
                        return -ENOMEM;
        } else {
index 57532465fb83a7dede4856ddaaf6466f330391d3..b4607dd9618521020bf10ee42c5d3a17b8992d59 100644 (file)
@@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
        mutex_unlock(&nbd->config_lock);
        ret = wait_event_interruptible(config->recv_wq,
                                         atomic_read(&config->recv_threads) == 0);
-       if (ret) {
+       if (ret)
                sock_shutdown(nbd);
-               flush_workqueue(nbd->recv_workq);
-       }
+       flush_workqueue(nbd->recv_workq);
+
        mutex_lock(&nbd->config_lock);
        nbd_bdev_reset(bdev);
        /* user requested, ignore socket errors */
index d4d88b5818225b6e3fe441b9190c4e7d82a0e04a..ed34785dd64bd7b913d8003993e4b741fd58eb41 100644 (file)
@@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
                return BLK_STS_IOERR;
        case BLK_ZONE_COND_EMPTY:
        case BLK_ZONE_COND_IMP_OPEN:
+       case BLK_ZONE_COND_EXP_OPEN:
+       case BLK_ZONE_COND_CLOSED:
                /* Writes must be at the write pointer position */
                if (sector != zone->wp)
                        return BLK_STS_IOERR;
 
-               if (zone->cond == BLK_ZONE_COND_EMPTY)
+               if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
                        zone->cond = BLK_ZONE_COND_IMP_OPEN;
 
                zone->wp += nr_sectors;
@@ -186,7 +188,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
                if (zone->cond == BLK_ZONE_COND_FULL)
                        return BLK_STS_IOERR;
 
-               zone->cond = BLK_ZONE_COND_CLOSED;
+               if (zone->wp == zone->start)
+                       zone->cond = BLK_ZONE_COND_EMPTY;
+               else
+                       zone->cond = BLK_ZONE_COND_CLOSED;
                break;
        case REQ_OP_ZONE_FINISH:
                if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
index ee67bf929fac8d57e5750c60dacf3ca58a73891d..861fc65a1b751a5a19ae8a5be8c1ede985df26ad 100644 (file)
@@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = {
        .release =              pkt_close,
        .ioctl =                pkt_ioctl,
 #ifdef CONFIG_COMPAT
-       .ioctl =                pkt_compat_ioctl,
+       .compat_ioctl =         pkt_compat_ioctl,
 #endif
        .check_events =         pkt_check_events,
 };
index 1f3f9e0f02a82673a6280c0dc0edf39a738c0aae..4eaf97d7a1704ff8daa64fb8dc55acc4864ddccc 100644 (file)
@@ -827,7 +827,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                goto failed_req_csr;
        }
 
-       card->csr_remap = ioremap_nocache(csr_base, csr_len);
+       card->csr_remap = ioremap(csr_base, csr_len);
        if (!card->csr_remap) {
                dev_printk(KERN_ERR, &card->dev->dev,
                        "Unable to remap memory region\n");
index e8c5c54e1d2657a1394dde3bdee52b443d32b2e3..4c5d99f8781361867344f3f49ddc9f825bcc285a 100644 (file)
@@ -171,6 +171,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
        blkif->domid = domid;
        atomic_set(&blkif->refcnt, 1);
        init_completion(&blkif->drain_complete);
+
+       /*
+        * Because freeing back to the cache may be deferred, it is not
+        * safe to unload the module (and hence destroy the cache) until
+        * this has completed. To prevent premature unloading, take an
+        * extra module reference here and release only when the object
+        * has been freed back to the cache.
+        */
+       __module_get(THIS_MODULE);
        INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
 
        return blkif;
@@ -181,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
 {
        int err;
        struct xen_blkif *blkif = ring->blkif;
+       const struct blkif_common_sring *sring_common;
+       RING_IDX rsp_prod, req_prod;
+       unsigned int size;
 
        /* Already connected through? */
        if (ring->irq)
@@ -191,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
        if (err < 0)
                return err;
 
+       sring_common = (struct blkif_common_sring *)ring->blk_ring;
+       rsp_prod = READ_ONCE(sring_common->rsp_prod);
+       req_prod = READ_ONCE(sring_common->req_prod);
+
        switch (blkif->blk_protocol) {
        case BLKIF_PROTOCOL_NATIVE:
        {
-               struct blkif_sring *sring;
-               sring = (struct blkif_sring *)ring->blk_ring;
-               BACK_RING_INIT(&ring->blk_rings.native, sring,
-                              XEN_PAGE_SIZE * nr_grefs);
+               struct blkif_sring *sring_native =
+                       (struct blkif_sring *)ring->blk_ring;
+
+               BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
+                                rsp_prod, XEN_PAGE_SIZE * nr_grefs);
+               size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
                break;
        }
        case BLKIF_PROTOCOL_X86_32:
        {
-               struct blkif_x86_32_sring *sring_x86_32;
-               sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
-               BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
-                              XEN_PAGE_SIZE * nr_grefs);
+               struct blkif_x86_32_sring *sring_x86_32 =
+                       (struct blkif_x86_32_sring *)ring->blk_ring;
+
+               BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
+                                rsp_prod, XEN_PAGE_SIZE * nr_grefs);
+               size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
                break;
        }
        case BLKIF_PROTOCOL_X86_64:
        {
-               struct blkif_x86_64_sring *sring_x86_64;
-               sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
-               BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
-                              XEN_PAGE_SIZE * nr_grefs);
+               struct blkif_x86_64_sring *sring_x86_64 =
+                       (struct blkif_x86_64_sring *)ring->blk_ring;
+
+               BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
+                                rsp_prod, XEN_PAGE_SIZE * nr_grefs);
+               size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
                break;
        }
        default:
                BUG();
        }
 
+       err = -EIO;
+       if (req_prod - rsp_prod > size)
+               goto fail;
+
        err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
                                                    xen_blkif_be_int, 0,
                                                    "blkif-backend", ring);
-       if (err < 0) {
-               xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
-               ring->blk_rings.common.sring = NULL;
-               return err;
-       }
+       if (err < 0)
+               goto fail;
        ring->irq = err;
 
        return 0;
+
+fail:
+       xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
+       ring->blk_rings.common.sring = NULL;
+       return err;
 }
 
 static int xen_blkif_disconnect(struct xen_blkif *blkif)
@@ -320,6 +348,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
 
        /* Make sure everything is drained before shutting down */
        kmem_cache_free(xen_blkif_cachep, blkif);
+       module_put(THIS_MODULE);
 }
 
 int __init xen_blkif_interface_init(void)
@@ -1121,7 +1150,8 @@ static struct xenbus_driver xen_blkbk_driver = {
        .ids  = xen_blkbk_ids,
        .probe = xen_blkbk_probe,
        .remove = xen_blkbk_remove,
-       .otherend_changed = frontend_changed
+       .otherend_changed = frontend_changed,
+       .allow_rebind = true,
 };
 
 int xen_blkif_xenbus_init(void)
index a74d03913822df88989b9b0198da99dea490eef1..c02be06c529950ee89a08bc985cab6531f432342 100644 (file)
@@ -1113,8 +1113,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        if (!VDEV_IS_EXTENDED(info->vdevice)) {
                err = xen_translate_vdev(info->vdevice, &minor, &offset);
                if (err)
-                       return err;             
-               nr_parts = PARTS_PER_DISK;
+                       return err;
+               nr_parts = PARTS_PER_DISK;
        } else {
                minor = BLKIF_MINOR_EXT(info->vdevice);
                nr_parts = PARTS_PER_EXT_DISK;
index d9629fc13a15538115abb0fc46702087c0d401d7..6ae48ad804091f79040dc7ef2034d3a0a8b8d0ea 100644 (file)
@@ -97,12 +97,12 @@ int __must_check fsl_create_mc_io(struct device *dev,
                return -EBUSY;
        }
 
-       mc_portal_virt_addr = devm_ioremap_nocache(dev,
+       mc_portal_virt_addr = devm_ioremap(dev,
                                                   mc_portal_phys_addr,
                                                   mc_portal_size);
        if (!mc_portal_virt_addr) {
                dev_err(dev,
-                       "devm_ioremap_nocache failed for MC portal %pa\n",
+                       "devm_ioremap failed for MC portal %pa\n",
                        &mc_portal_phys_addr);
                return -ENXIO;
        }
index 56887c6877a7b519c2bba19f9c15c273d6035eb2..ccb44fe790a71ebbc5ccfa07216cbffaad29d6bd 100644 (file)
@@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata)
                return -EINVAL;
        }
 
+       /* Always add a slot for main clocks fck and ick even if unused */
+       if (!nr_fck)
+               ddata->nr_clocks++;
+       if (!nr_ick)
+               ddata->nr_clocks++;
+
        ddata->clocks = devm_kcalloc(ddata->dev,
                                     ddata->nr_clocks, sizeof(*ddata->clocks),
                                     GFP_KERNEL);
@@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata)
        struct clk *clock;
        int i, error;
 
-       if (!ddata->clocks)
+       if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
                return 0;
 
        for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
        struct clk *clock;
        int i;
 
-       if (!ddata->clocks)
+       if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
                return;
 
        for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
@@ -981,7 +987,8 @@ static int sysc_disable_module(struct device *dev)
                return ret;
        }
 
-       if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+       if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
+           ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
                best_mode = SYSC_IDLE_FORCE;
 
        reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
@@ -1583,6 +1590,10 @@ static int sysc_reset(struct sysc *ddata)
        sysc_val |= sysc_mask;
        sysc_write(ddata, sysc_offset, sysc_val);
 
+       if (ddata->cfg.srst_udelay)
+               usleep_range(ddata->cfg.srst_udelay,
+                            ddata->cfg.srst_udelay * 2);
+
        if (ddata->clk_enable_quirk)
                ddata->clk_enable_quirk(ddata);
 
index ab154a75acf0edd6d68a297df3bf9b7c0f25834b..9e84239f88d4c8478be133c0cf2f1d7977f9d92f 100644 (file)
@@ -941,7 +941,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
 
        bridge->gatt_table = (u32 __iomem *)table;
 #else
-       bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
+       bridge->gatt_table = ioremap(virt_to_phys(table),
                                        (PAGE_SIZE * (1 << page_order)));
        bridge->driver->cache_flush();
 #endif
index c6271ce250b32150ffa6516526d79f80516398dc..66a62d17a3f51ce80bd7220d2110b1225e341a94 100644 (file)
@@ -1087,7 +1087,7 @@ static void intel_i9xx_setup_flush(void)
        }
 
        if (intel_private.ifp_resource.start)
-               intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+               intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
        if (!intel_private.i9xx_flush_page)
                dev_err(&intel_private.pcidev->dev,
                        "can't ioremap flush page - no chipset flushing\n");
index 31c374b1b91b038827254824b48bb91b8b577d68..7ecf20a6d19cb62564f9b17878717483c009755a 100644 (file)
@@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
        unsigned int cdev = 0;
        u32 mnistat, tnistat, tstatus, mcmd;
        u16 tnicmd, mnicmd;
-       u8 mcapndx;
        u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
        u32 step, rem, rem_isoch, rem_async;
        int ret = 0;
@@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
                cur = list_entry(pos, struct agp_3_5_dev, list);
                dev = cur->dev;
 
-               mcapndx = cur->capndx;
-
                pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
 
                master[cdev].maxbw = (mnistat >> 16) & 0xff;
@@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
                cur = master[cdev].dev;
                dev = cur->dev;
 
-               mcapndx = cur->capndx;
-
                master[cdev].rq += (cdev == ndevs - 1)
                              ? (rem_async + rem_isoch) : step;
 
@@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
 {
        struct pci_dev *td = bridge->dev, *dev = NULL;
        u8 mcapndx;
-       u32 isoch, arqsz;
+       u32 isoch;
        u32 tstatus, mstatus, ncapid;
        u32 mmajor;
        u16 mpstat;
@@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
        if (isoch == 0) /* isoch xfers not available, bail out. */
                return -ENODEV;
 
-       arqsz     = (tstatus >> 13) & 0x7;
-
        /*
         * Allocate a head for our AGP 3.5 device list
         * (multiple AGP v3 devices are allowed behind a single bridge).
index eb108b3c619a3f5b3a71291cb1f58dca977a9fab..51121a4b82c77105984216c96d949da6cd42a4f2 100644 (file)
@@ -204,7 +204,7 @@ static int __init applicom_init(void)
                if (pci_enable_device(dev))
                        return -EIO;
 
-               RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
+               RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
 
                if (!RamIO) {
                        printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
@@ -259,7 +259,7 @@ static int __init applicom_init(void)
        /* Now try the specified ISA cards */
 
        for (i = 0; i < MAX_ISA_BOARD; i++) {
-               RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+               RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
 
                if (!RamIO) {
                        printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
index 290c880266bfed3780bba77e674634e91c83d2b2..9f205bd1acc0124ae5546aab56ea397b228a05e7 100644 (file)
@@ -317,7 +317,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
                return -EBUSY;
        }
 
-       intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
+       intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
        if (intel_rng_hw->mem == NULL)
                return -EBUSY;
 
index 8c78aa0904925728404b49aedda73a212330cb3a..7be8067ac4e87cfe9e5009811f5e91b02058e57e 100644 (file)
@@ -81,13 +81,13 @@ static int octeon_rng_probe(struct platform_device *pdev)
                return -ENOENT;
 
 
-       rng->control_status = devm_ioremap_nocache(&pdev->dev,
+       rng->control_status = devm_ioremap(&pdev->dev,
                                                   res_ports->start,
                                                   sizeof(u64));
        if (!rng->control_status)
                return -ENOENT;
 
-       rng->result = devm_ioremap_nocache(&pdev->dev,
+       rng->result = devm_ioremap(&pdev->dev,
                                           res_result->start,
                                           sizeof(u64));
        if (!rng->result)
index 909e0c3d82ea443bc5a34c3e085fa7835f2df1b5..cda12933a17dadd1f25c676e4414750175ad4ce8 100644 (file)
@@ -2175,6 +2175,7 @@ const struct file_operations urandom_fops = {
        .read  = urandom_read,
        .write = random_write,
        .unlocked_ioctl = random_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
        .fasync = random_fasync,
        .llseek = noop_llseek,
 };
index 2ec47a69a2a6c1d061d1f94dca7d7f7be4421d57..87f4493402021b6af277881cbdb928cddbdf10db 100644 (file)
@@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work)
 
        mutex_lock(&priv->buffer_mutex);
        priv->command_enqueued = false;
+       ret = tpm_try_get_ops(priv->chip);
+       if (ret) {
+               priv->response_length = ret;
+               goto out;
+       }
+
        ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
                               sizeof(priv->data_buffer));
        tpm_put_ops(priv->chip);
@@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work)
                priv->response_length = ret;
                mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
        }
+out:
        mutex_unlock(&priv->buffer_mutex);
        wake_up_interruptible(&priv->async_wait);
 }
@@ -123,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
                priv->response_read = true;
 
                ret_size = min_t(ssize_t, size, priv->response_length);
-               if (!ret_size) {
+               if (ret_size <= 0) {
                        priv->response_length = 0;
                        goto out;
                }
@@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
        if (file->f_flags & O_NONBLOCK) {
                priv->command_enqueued = true;
                queue_work(tpm_dev_wq, &priv->async_work);
+               tpm_put_ops(priv->chip);
                mutex_unlock(&priv->buffer_mutex);
                return size;
        }
index 1089fc0bb290cb59098321fbf6605872167f7a15..f3742bcc73e377ce8b1bbc35fdbe1c4a2978d7af 100644 (file)
@@ -14,7 +14,7 @@ struct file_priv {
        struct work_struct timeout_work;
        struct work_struct async_work;
        wait_queue_head_t async_wait;
-       size_t response_length;
+       ssize_t response_length;
        bool response_read;
        bool command_enqueued;
 
index 3b53b3e5ec3e7caad780fad060ca99a8e6333c8f..d52bf4df0bca96d83a1233f4b3f870c3365805a2 100644 (file)
@@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(timeouts);
 
-static struct attribute *tpm_dev_attrs[] = {
+static ssize_t tpm_version_major_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct tpm_chip *chip = to_tpm_chip(dev);
+
+       return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+                      ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
        &dev_attr_pubek.attr,
        &dev_attr_pcrs.attr,
        &dev_attr_enabled.attr,
@@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = {
        &dev_attr_cancel.attr,
        &dev_attr_durations.attr,
        &dev_attr_timeouts.attr,
+       &dev_attr_tpm_version_major.attr,
        NULL,
 };
 
-static const struct attribute_group tpm_dev_group = {
-       .attrs = tpm_dev_attrs,
+static struct attribute *tpm2_dev_attrs[] = {
+       &dev_attr_tpm_version_major.attr,
+       NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+       .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+       .attrs = tpm2_dev_attrs,
 };
 
 void tpm_sysfs_add_device(struct tpm_chip *chip)
 {
-       if (chip->flags & TPM_CHIP_FLAG_TPM2)
-               return;
-
        WARN_ON(chip->groups_cnt != 0);
-       chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+       if (chip->flags & TPM_CHIP_FLAG_TPM2)
+               chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+       else
+               chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
 }
index b9e1547be6b51e1ea60491cfbdb9705465ec6626..5620747da0cfd7e75d0af923fe749212cf14938b 100644 (file)
@@ -218,7 +218,6 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
 int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
                    struct tpm_digest *digests);
 int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
-void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
 ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
                        u32 *value, const char *desc);
 
index fdb457704aa798437c1d56ad7c1a3de9a06c36ba..13696deceae8e7fb73862ea99d731a58fe647f67 100644 (file)
@@ -362,6 +362,7 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
        tpm_transmit_cmd(chip, &buf, 0, "flushing context");
        tpm_buf_destroy(&buf);
 }
+EXPORT_SYMBOL_GPL(tpm2_flush_context);
 
 struct tpm2_get_cap_out {
        u8 more_data;
index 6640a14dbe48cf137f2ed5212e699888730cfebd..22bf553ccf9df35e61c411707a78373f4ea8aa38 100644 (file)
@@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid =
                  0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
 
 /**
- * ftpm_tee_tpm_op_recv - retrieve fTPM response.
+ * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
  * @chip:      the tpm_chip description as specified in driver/char/tpm/tpm.h.
  * @buf:       the buffer to store data.
  * @count:     the number of bytes to read.
@@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 }
 
 /**
- * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
  * @chip:      the tpm_chip description as specified in driver/char/tpm/tpm.h
  * @buf:       the buffer to send.
  * @len:       the number of bytes to send.
@@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
 }
 
 /**
- * ftpm_tee_probe - initialize the fTPM
+ * ftpm_tee_probe() - initialize the fTPM
  * @pdev: the platform_device description.
  *
  * Return:
@@ -298,7 +298,7 @@ out_tee_session:
 }
 
 /**
- * ftpm_tee_remove - remove the TPM device
+ * ftpm_tee_remove() - remove the TPM device
  * @pdev: the platform_device description.
  *
  * Return:
@@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev)
        return 0;
 }
 
+/**
+ * ftpm_tee_shutdown() - shutdown the TPM device
+ * @pdev: the platform_device description.
+ */
+static void ftpm_tee_shutdown(struct platform_device *pdev)
+{
+       struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+       tee_shm_free(pvt_data->shm);
+       tee_client_close_session(pvt_data->ctx, pvt_data->session);
+       tee_client_close_context(pvt_data->ctx);
+}
+
 static const struct of_device_id of_ftpm_tee_ids[] = {
        { .compatible = "microsoft,ftpm" },
        { }
@@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = {
        },
        .probe = ftpm_tee_probe,
        .remove = ftpm_tee_remove,
+       .shutdown = ftpm_tee_shutdown,
 };
 
 module_platform_driver(ftpm_tee_driver);
index 8af2cee1a762cd1ad1092cabee4f0b757f2f4ff9..27c6ca031e23ec8a26401dc281271404e8e3a008 100644 (file)
@@ -1059,8 +1059,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
                        goto out_err;
                }
 
-               tpm_chip_start(chip);
-               chip->flags |= TPM_CHIP_FLAG_IRQ;
                if (irq) {
                        tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
                                                 irq);
@@ -1070,7 +1068,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
                } else {
                        tpm_tis_probe_irq(chip, intmask);
                }
-               tpm_chip_stop(chip);
        }
 
        rc = tpm_chip_register(chip);
index 0aabe49aed09449b153de865adc739dc8e22eacf..a9d4234758d7d42258a6686866bb20ba7aa0f847 100644 (file)
@@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
index 0ac34cdaa106932fb797c534dae04273d26b8aad..77fe83a73bf480815e3aca23678231fee960235d 100644 (file)
@@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
index 0855f3a80cc79ea8ea6933192964e02c0a2ef079..086cf0b4955c2ec5591e740785abe658452853fb 100644 (file)
@@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
index 0b03cfae3a9ddcae4c7906edc77c82ac40d30c6c..b71515acdec1f0eaebdf865fb184cdbed40d27b8 100644 (file)
@@ -275,7 +275,7 @@ static int __init pmc_register_ops(void)
 
        np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
 
-       pmcreg = syscon_node_to_regmap(np);
+       pmcreg = device_node_to_regmap(np);
        if (IS_ERR(pmcreg))
                return PTR_ERR(pmcreg);
 
index 0de1108737db9326de1507dc8bb7b6e48b5df1c9..ff7e3f727082e261a5990d332b16bcec6750ee3d 100644 (file)
@@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
index 25b156d4e645f6daf4edea37f16cede5ac3ce4b9..a6dee4a3b6e48eade40a187be4b195cf0c7b17fe 100644 (file)
@@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
index b68e200829f202d78f49af69cd20c291f0d92022..772258de2d1f3c270e0c5747fb848be5e7c506e4 100644 (file)
@@ -3249,6 +3249,34 @@ static inline void clk_debug_unregister(struct clk_core *core)
 }
 #endif
 
+static void clk_core_reparent_orphans_nolock(void)
+{
+       struct clk_core *orphan;
+       struct hlist_node *tmp2;
+
+       /*
+        * walk the list of orphan clocks and reparent any that newly finds a
+        * parent.
+        */
+       hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
+               struct clk_core *parent = __clk_init_parent(orphan);
+
+               /*
+                * We need to use __clk_set_parent_before() and _after() to
+                * to properly migrate any prepare/enable count of the orphan
+                * clock. This is important for CLK_IS_CRITICAL clocks, which
+                * are enabled during init but might not have a parent yet.
+                */
+               if (parent) {
+                       /* update the clk tree topology */
+                       __clk_set_parent_before(orphan, parent);
+                       __clk_set_parent_after(orphan, parent, NULL);
+                       __clk_recalc_accuracies(orphan);
+                       __clk_recalc_rates(orphan, 0);
+               }
+       }
+}
+
 /**
  * __clk_core_init - initialize the data structures in a struct clk_core
  * @core:      clk_core being initialized
@@ -3259,8 +3287,6 @@ static inline void clk_debug_unregister(struct clk_core *core)
 static int __clk_core_init(struct clk_core *core)
 {
        int ret;
-       struct clk_core *orphan;
-       struct hlist_node *tmp2;
        unsigned long rate;
 
        if (!core)
@@ -3400,34 +3426,21 @@ static int __clk_core_init(struct clk_core *core)
        if (core->flags & CLK_IS_CRITICAL) {
                unsigned long flags;
 
-               clk_core_prepare(core);
+               ret = clk_core_prepare(core);
+               if (ret)
+                       goto out;
 
                flags = clk_enable_lock();
-               clk_core_enable(core);
+               ret = clk_core_enable(core);
                clk_enable_unlock(flags);
+               if (ret) {
+                       clk_core_unprepare(core);
+                       goto out;
+               }
        }
 
-       /*
-        * walk the list of orphan clocks and reparent any that newly finds a
-        * parent.
-        */
-       hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
-               struct clk_core *parent = __clk_init_parent(orphan);
+       clk_core_reparent_orphans_nolock();
 
-               /*
-                * We need to use __clk_set_parent_before() and _after() to
-                * to properly migrate any prepare/enable count of the orphan
-                * clock. This is important for CLK_IS_CRITICAL clocks, which
-                * are enabled during init but might not have a parent yet.
-                */
-               if (parent) {
-                       /* update the clk tree topology */
-                       __clk_set_parent_before(orphan, parent);
-                       __clk_set_parent_after(orphan, parent, NULL);
-                       __clk_recalc_accuracies(orphan);
-                       __clk_recalc_rates(orphan, 0);
-               }
-       }
 
        kref_init(&core->ref);
 out:
@@ -4179,6 +4192,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
 
 #ifdef CONFIG_OF
+static void clk_core_reparent_orphans(void)
+{
+       clk_prepare_lock();
+       clk_core_reparent_orphans_nolock();
+       clk_prepare_unlock();
+}
+
 /**
  * struct of_clk_provider - Clock provider registration structure
  * @link: Entry in global list of clock providers
@@ -4274,6 +4294,8 @@ int of_clk_add_provider(struct device_node *np,
        mutex_unlock(&of_clk_mutex);
        pr_debug("Added clock from %pOF\n", np);
 
+       clk_core_reparent_orphans();
+
        ret = of_clk_set_defaults(np, true);
        if (ret < 0)
                of_clk_del_provider(np);
@@ -4309,6 +4331,8 @@ int of_clk_add_hw_provider(struct device_node *np,
        mutex_unlock(&of_clk_mutex);
        pr_debug("Added clk_hw provider from %pOF\n", np);
 
+       clk_core_reparent_orphans();
+
        ret = of_clk_set_defaults(np, true);
        if (ret < 0)
                of_clk_del_provider(np);
index 388bdb94f841da0a153cb024346832d282e853b8..d3486ee79ab54ebf186e94deb7b37d759a553f76 100644 (file)
@@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
        mux->reg = reg;
        mux->shift = PCG_PCS_SHIFT;
        mux->mask = PCG_PCS_MASK;
+       mux->lock = &imx_ccm_lock;
 
        div = kzalloc(sizeof(*div), GFP_KERNEL);
        if (!div)
@@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
        gate_hw = &gate->hw;
        gate->reg = reg;
        gate->bit_idx = PCG_CGC_SHIFT;
+       gate->lock = &imx_ccm_lock;
 
        hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
                        mux_hw, &clk_mux_ops, div_hw,
index 3fdf3d494f0afc305c0212fb57914f1abbbf498c..281191b55b3af5b59a43384bb2d1949d7393e98d 100644 (file)
@@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = {
        { .val = 5, .div = 16, },
        { .val = 6, .div = 32, },
        { .val = 7, .div = 64, },
+       { /* sentinel */ },
 };
 
 static const int pcc2_uart_clk_ids[] __initconst = {
index 5c458199060a6e9557bb36b02fb488bb0b76a1bc..3636c8035c7d95e12012a2e9ff0bb40dc659c50b 100644 (file)
@@ -159,7 +159,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
 {
        u32 val;
 
-       return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
+       return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
                        LOCK_TIMEOUT_US);
 }
 
index a60a1be937ad6566a50270fef46b458ba8edf954..b4a95cbbda989fae6dbc2aca8bd90f4ea945ee77 100644 (file)
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
 static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
 
 static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
 
 static DEFINE_SPINLOCK(reset_lock);
 
index 38424e63bcae2e5f03494e58ba0d24f4d17b8765..7f59fb8da0337c64bb04cabd00c2de8efe2c05b1 100644 (file)
@@ -2186,7 +2186,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
        .pd = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
        },
-       .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -2194,7 +2195,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
        .pd = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
        },
-       .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc *gcc_sc7180_gdscs[] = {
index f7b370f3acef6f15de4887e95aa22f448e88d863..f6ce888098be9b3fbeab9852e84359614ac45d54 100644 (file)
@@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
@@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
@@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
@@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
                .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
@@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
@@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
@@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
                .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct clk_regmap *gcc_sdm845_clocks[] = {
index e5e2492b20c5e840cb999142c70bb4360edc2035..9b3923af02a142dcf7c821114adee5522a3c542f 100644 (file)
@@ -242,10 +242,12 @@ static struct clk_branch gfx3d_isense_clk = {
 
 static struct gdsc gpu_cx_gdsc = {
        .gdscr = 0x1004,
+       .gds_hw_ctrl = 0x1008,
        .pd = {
                .name = "gpu_cx",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
 };
 
 static struct gdsc gpu_gx_gdsc = {
index fbc34beafc7820eaf704d8ce8ddce045f84ab369..7b703f14e20bb7f1ad45c594c7abab67dc13cce8 100644 (file)
@@ -37,8 +37,8 @@ static u16 __init rz_cpg_read_mode_pins(void)
        void __iomem *ppr0, *pibc0;
        u16 modes;
 
-       ppr0 = ioremap_nocache(PPR0, 2);
-       pibc0 = ioremap_nocache(PIBC0, 2);
+       ppr0 = ioremap(PPR0, 2);
+       pibc0 = ioremap(PIBC0, 2);
        BUG_ON(!ppr0 || !pibc0);
        iowrite16(4, pibc0);    /* enable input buffer */
        modes = ioread16(ppr0);
index 3a991ca1ee3605af87831a603914db9ee3fe5deb..c9e5a1fb66539eed0f078d01849aa947752c2d5b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/clk.h>
 
 #include "clk.h"
 #include "clk-cpu.h"
@@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np,
                                     exynos5x_subcmus);
        }
 
+       /*
+        * Keep top part of G3D clock path enabled permanently to ensure
+        * that the internal busses get their clock regardless of the
+        * main G3D clock enablement status.
+        */
+       clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+
        samsung_clk_of_add_provider(np, ctx);
 }
 
index 45a1ed3fe6742fd7ed9edc8f797a0f80e63a220b..50f8d1bc7046552804a1bad0576be558e9b89cb6 100644 (file)
@@ -23,9 +23,9 @@
  */
 
 static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
-                                            "pll-periph0", "iosc" };
+                                                    "iosc", "pll-periph0" };
 static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
-       { .index = 2, .shift = 0, .width = 5 },
+       { .index = 3, .shift = 0, .width = 5 },
 };
 
 static struct ccu_div ar100_clk = {
@@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = {
 
 static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0);
 
-static struct ccu_div r_apb1_clk = {
-       .div            = _SUNXI_CCU_DIV(0, 2),
-
-       .common         = {
-               .reg            = 0x00c,
-               .hw.init        = CLK_HW_INIT("r-apb1",
-                                             "r-ahb",
-                                             &ccu_div_ops,
-                                             0),
-       },
-};
+static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0);
 
 static struct ccu_div r_apb2_clk = {
        .div            = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
index 4646fdc61053b0c65d857a8424c71e5e517a969d..4c8c491b87c2777d636cdd15b603e0c45c72cc71 100644 (file)
@@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = {
 
 static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
 
-static struct ccu_div apb0_clk = {
-       .div            = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
-
-       .common         = {
-               .reg            = 0x0c,
-               .hw.init        = CLK_HW_INIT_HW("apb0",
-                                                &ahb0_clk.hw,
-                                                &ccu_div_ops,
-                                                0),
-       },
-};
-
-static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
 
 /*
  * Define the parent as an array that can be reused to save space
@@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = {
 
 static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
        &ar100_clk.common,
-       &a83t_apb0_clk.common,
+       &apb0_clk.common,
        &apb0_pio_clk.common,
        &apb0_ir_clk.common,
        &apb0_timer_clk.common,
@@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
        .hws    = {
                [CLK_AR100]             = &ar100_clk.common.hw,
                [CLK_AHB0]              = &ahb0_clk.hw,
-               [CLK_APB0]              = &a83t_apb0_clk.common.hw,
+               [CLK_APB0]              = &apb0_clk.common.hw,
                [CLK_APB0_PIO]          = &apb0_pio_clk.common.hw,
                [CLK_APB0_IR]           = &apb0_ir_clk.common.hw,
                [CLK_APB0_TIMER]        = &apb0_timer_clk.common.hw,
@@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
 
 static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
 {
-       /* Fix apb0 bus gate parents here */
-       apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
-
        sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
 }
 CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
index 897490800102f3612b79d9c6e37efed0e74ef08e..23bfe1d12f217efdde824e3288d0727d29064e73 100644 (file)
@@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = {
                .reg            = 0x1f0,
                .features       = CCU_FEATURE_FIXED_PREDIV,
                .hw.init        = CLK_HW_INIT_PARENTS("outa", out_parents,
-                                                     &ccu_mp_ops, 0),
+                                                     &ccu_mp_ops,
+                                                     CLK_SET_RATE_PARENT),
        }
 };
 
@@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = {
                .reg            = 0x1f4,
                .features       = CCU_FEATURE_FIXED_PREDIV,
                .hw.init        = CLK_HW_INIT_PARENTS("outb", out_parents,
-                                                     &ccu_mp_ops, 0),
+                                                     &ccu_mp_ops,
+                                                     CLK_SET_RATE_PARENT),
        }
 };
 
index 5c779eec454b6edc734ced3a9fd7d5b1a020f4de..0e36ca3bf3d528d1788c195f84bdeea59ef5897b 100644 (file)
@@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
                [CLK_MBUS]              = &mbus_clk.common.hw,
                [CLK_MIPI_CSI]          = &mipi_csi_clk.common.hw,
        },
-       .num    = CLK_NUMBER,
+       .num    = CLK_PLL_DDR1 + 1,
 };
 
 static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
@@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
                [CLK_MBUS]              = &mbus_clk.common.hw,
                [CLK_MIPI_CSI]          = &mipi_csi_clk.common.hw,
        },
-       .num    = CLK_NUMBER,
+       .num    = CLK_I2S0 + 1,
 };
 
 static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
index b0160d305a6775a3721a5438b4223e8a77251367..108eeeedcbf7602dfbe55b40624ccae9786b27d0 100644 (file)
@@ -51,6 +51,4 @@
 
 #define CLK_PLL_DDR1           74
 
-#define CLK_NUMBER             (CLK_I2S0 + 1)
-
 #endif /* _CCU_SUN8I_H3_H_ */
index e6bd6d1ea0128525f76fb4b07f12e7a31054f133..f6cdce441cf7ac4e45eb9bb26576187c47d546e1 100644 (file)
@@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
        periph_banks = banks;
 
        clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
-       if (!clks)
+       if (!clks) {
                kfree(periph_clk_enb_refcnt);
+               return NULL;
+       }
 
        clk_num = num;
 
index f65e16c4f3c4b8585ea56cc97633730a6882a01f..8d4c08b034bdde50e77b60bd15cacba7860698b2 100644 (file)
@@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
        cinfo->iobase = of_iomap(node, 0);
        cinfo->dev = &pdev->dev;
        pm_runtime_enable(cinfo->dev);
-       pm_runtime_irq_safe(cinfo->dev);
 
        pm_runtime_get_sync(cinfo->dev);
        atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
index 5fdd76cb1768117e069b7aba83bc294d60c838a3..cc909e465823ddeab37903f321fdca4298305fb9 100644 (file)
@@ -88,7 +88,7 @@ config ROCKCHIP_TIMER
        select TIMER_OF
        select CLKSRC_MMIO
        help
-         Enables the support for the rockchip timer driver.
+         Enables the support for the Rockchip timer driver.
 
 config ARMADA_370_XP_TIMER
        bool "Armada 370 and XP timer driver" if COMPILE_TEST
@@ -162,13 +162,13 @@ config NPCM7XX_TIMER
        select CLKSRC_MMIO
        help
          Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
-         While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
+         where TIMER0 serves as clockevent and TIMER1 serves as clocksource.
 
 config CADENCE_TTC_TIMER
        bool "Cadence TTC timer driver" if COMPILE_TEST
        depends on COMMON_CLK
        help
-         Enables support for the cadence ttc driver.
+         Enables support for the Cadence TTC driver.
 
 config ASM9260_TIMER
        bool "ASM9260 timer driver" if COMPILE_TEST
@@ -190,10 +190,10 @@ config CLKSRC_DBX500_PRCMU
        bool "Clocksource PRCMU Timer" if COMPILE_TEST
        depends on HAS_IOMEM
        help
-         Use the always on PRCMU Timer as clocksource
+         Use the always on PRCMU Timer as clocksource.
 
 config CLPS711X_TIMER
-       bool "Cirrus logic timer driver" if COMPILE_TEST
+       bool "Cirrus Logic timer driver" if COMPILE_TEST
        select CLKSRC_MMIO
        help
          Enables support for the Cirrus Logic PS711 timer.
@@ -205,11 +205,11 @@ config ATLAS7_TIMER
          Enables support for the Atlas7 timer.
 
 config MXS_TIMER
-       bool "Mxs timer driver" if COMPILE_TEST
+       bool "MXS timer driver" if COMPILE_TEST
        select CLKSRC_MMIO
        select STMP_DEVICE
        help
-         Enables support for the Mxs timer.
+         Enables support for the MXS timer.
 
 config PRIMA2_TIMER
        bool "Prima2 timer driver" if COMPILE_TEST
@@ -238,10 +238,10 @@ config KEYSTONE_TIMER
          Enables support for the Keystone timer.
 
 config INTEGRATOR_AP_TIMER
-       bool "Integrator-ap timer driver" if COMPILE_TEST
+       bool "Integrator-AP timer driver" if COMPILE_TEST
        select CLKSRC_MMIO
        help
-         Enables support for the Integrator-ap timer.
+         Enables support for the Integrator-AP timer.
 
 config CLKSRC_EFM32
        bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
@@ -283,8 +283,8 @@ config CLKSRC_NPS
        select TIMER_OF if OF
        help
          NPS400 clocksource support.
-         Got 64 bit counter with update rate up to 1000MHz.
-         This counter is accessed via couple of 32 bit memory mapped registers.
+         It has a 64-bit counter with update rate up to 1000MHz.
+         This counter is accessed via couple of 32-bit memory-mapped registers.
 
 config CLKSRC_STM32
        bool "Clocksource for STM32 SoCs" if !ARCH_STM32
@@ -305,14 +305,14 @@ config ARC_TIMERS
        help
          These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
          (ARC700 as well as ARC HS38).
-         TIMER0 serves as clockevent while TIMER1 provides clocksource
+         TIMER0 serves as clockevent while TIMER1 provides clocksource.
 
 config ARC_TIMERS_64BIT
        bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
        depends on ARC_TIMERS
        select TIMER_OF
        help
-         This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+         This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP).
          RTC is implemented inside the core, while GFRC sits outside the core in
          ARConnect IP block. Driver automatically picks one of them for clocksource
          as appropriate.
@@ -390,7 +390,7 @@ config ARM_GLOBAL_TIMER
        select TIMER_OF if OF
        depends on ARM
        help
-         This options enables support for the ARM global timer unit
+         This option enables support for the ARM global timer unit.
 
 config ARM_TIMER_SP804
        bool "Support for Dual Timer SP804 module" if COMPILE_TEST
@@ -403,14 +403,14 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
        depends on ARM_GLOBAL_TIMER
        default y
        help
-        Use ARM global timer clock source as sched_clock
+         Use ARM global timer clock source as sched_clock.
 
 config ARMV7M_SYSTICK
        bool "Support for the ARMv7M system time" if COMPILE_TEST
        select TIMER_OF if OF
        select CLKSRC_MMIO
        help
-         This options enables support for the ARMv7M system timer unit
+         This option enables support for the ARMv7M system timer unit.
 
 config ATMEL_PIT
        bool "Atmel PIT support" if COMPILE_TEST
@@ -460,7 +460,7 @@ config VF_PIT_TIMER
        bool
        select CLKSRC_MMIO
        help
-         Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+         Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
 
 config OXNAS_RPS_TIMER
        bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
@@ -470,7 +470,7 @@ config OXNAS_RPS_TIMER
          This enables support for the Oxford Semiconductor OXNAS RPS timers.
 
 config SYS_SUPPORTS_SH_CMT
-        bool
+       bool
 
 config MTK_TIMER
        bool "Mediatek timer driver" if COMPILE_TEST
@@ -490,13 +490,13 @@ config SPRD_TIMER
          Enables support for the Spreadtrum timer driver.
 
 config SYS_SUPPORTS_SH_MTU2
-        bool
+       bool
 
 config SYS_SUPPORTS_SH_TMU
-        bool
+       bool
 
 config SYS_SUPPORTS_EM_STI
-        bool
+       bool
 
 config CLKSRC_JCORE_PIT
        bool "J-Core PIT timer driver" if COMPILE_TEST
@@ -523,7 +523,7 @@ config SH_TIMER_MTU2
        help
          This enables build of a clockevent driver for the Multi-Function
          Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas.
-         This hardware comes with 16 bit-timer registers.
+         This hardware comes with 16-bit timer registers.
 
 config RENESAS_OSTM
        bool "Renesas OSTM timer driver" if COMPILE_TEST
@@ -580,7 +580,7 @@ config CLKSRC_TANGO_XTAL
        select TIMER_OF
        select CLKSRC_MMIO
        help
-         This enables the clocksource for Tango SoC
+         This enables the clocksource for Tango SoC.
 
 config CLKSRC_PXA
        bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
@@ -591,24 +591,24 @@ config CLKSRC_PXA
          platforms.
 
 config H8300_TMR8
-        bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
-        depends on HAS_IOMEM
+       bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This enables the 8 bits timer for the H8300 platform.
 
 config H8300_TMR16
-        bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
-        depends on HAS_IOMEM
+       bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This enables the 16 bits timer for the H8300 platform with the
-         H83069 cpu.
+         H83069 CPU.
 
 config H8300_TPU
-        bool "Clocksource for the H8300 platform" if COMPILE_TEST
-        depends on HAS_IOMEM
+       bool "Clocksource for the H8300 platform" if COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This enables the clocksource for the H8300 platform with the
-         H8S2678 cpu.
+         H8S2678 CPU.
 
 config CLKSRC_IMX_GPT
        bool "Clocksource using i.MX GPT" if COMPILE_TEST
@@ -666,8 +666,8 @@ config CSKY_MP_TIMER
        help
          Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP
          system.
-         csky,mptimer is not only used in SMP system, it also could be used
-         single core system. It's not a mmio reg and it use mtcr/mfcr instruction.
+         csky,mptimer is not only used in SMP system, it also could be used in
+         single core system. It's not a mmio reg and it uses mtcr/mfcr instruction.
 
 config GX6605S_TIMER
        bool "Gx6605s SOC system timer driver" if COMPILE_TEST
@@ -697,4 +697,14 @@ config INGENIC_TIMER
        help
          Support for the timer/counter unit of the Ingenic JZ SoCs.
 
+config MICROCHIP_PIT64B
+       bool "Microchip PIT64B support"
+       depends on OF || COMPILE_TEST
+       select CLKSRC_MMIO
+       help
+         This option enables Microchip PIT64B timer for Atmel
+         based system. It supports the oneshot, the periodic
+         modes and high resolution. It is used as a clocksource
+         and a clockevent.
+
 endmenu
index 4dfe4225ece788a1d7ca13c748e008bfe1511a38..713686faa549a1925132e4dbf8e9e9d80c057b41 100644 (file)
@@ -88,3 +88,4 @@ obj-$(CONFIG_RISCV_TIMER)             += timer-riscv.o
 obj-$(CONFIG_CSKY_MP_TIMER)            += timer-mp-csky.o
 obj-$(CONFIG_GX6605S_TIMER)            += timer-gx6605s.o
 obj-$(CONFIG_HYPERV_TIMER)             += hyperv_timer.o
+obj-$(CONFIG_MICROCHIP_PIT64B)         += timer-microchip-pit64b.o
index 2b196cbfadb625796078a0973e8c53dd39956d4d..b235f446ee50f950988e9dc63b090b7dfdbd2372 100644 (file)
@@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
        ret = setup_irq(irq, &timer->act);
        if (ret) {
                pr_err("Can't set up timer IRQ\n");
-               goto err_iounmap;
+               goto err_timer_free;
        }
 
        clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
 
        return 0;
 
+err_timer_free:
+       kfree(timer);
+
 err_iounmap:
        iounmap(base);
        return ret;
index 9039df4f90e2bd6caa58ee6848c8a48ebec7d365..ab190dffb1edcf10a180eacfb129db69ff1fd28d 100644 (file)
@@ -279,9 +279,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
 static int em_sti_probe(struct platform_device *pdev)
 {
        struct em_sti_priv *p;
-       struct resource *res;
-       int irq;
-       int ret;
+       int irq, ret;
 
        p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
        if (p == NULL)
@@ -295,8 +293,7 @@ static int em_sti_probe(struct platform_device *pdev)
                return irq;
 
        /* map memory, let base point to the STI instance */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       p->base = devm_ioremap_resource(&pdev->dev, res);
+       p->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(p->base))
                return PTR_ERR(p->base);
 
index 74cb299f5089e7fa00ff50ce4546c0512891f08c..a267fe31ef1330376b2bd03162374bf1dcf04b9b 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * EXYNOS4 MCT(Multi-Core Timer) support
+ * Exynos4 MCT(Multi-Core Timer) support
 */
 
 #include <linux/interrupt.h>
index 287d8d58c21ac15a4a72860ff8a433d3ce8caa34..9d808d595ca85e22ce8b0f9471c53e91a6a99446 100644 (file)
@@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta,
 {
        u64 current_tick;
 
-       current_tick = hyperv_cs->read(NULL);
+       current_tick = hv_read_reference_counter();
        current_tick += delta;
        hv_init_timer(0, current_tick);
        return 0;
@@ -302,22 +302,33 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
  * the other that uses the TSC reference page feature as defined in the
  * TLFS.  The MSR version is for compatibility with old versions of
  * Hyper-V and 32-bit x86.  The TSC reference page version is preferred.
+ *
+ * The Hyper-V clocksource ratings of 250 are chosen to be below the
+ * TSC clocksource rating of 300.  In configurations where Hyper-V offers
+ * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource
+ * is available and preferred.  With the higher rating, it will be the
+ * default.  On older hardware and Hyper-V versions, the TSC is marked
+ * "unstable", so no TSC clocksource is created and the selected Hyper-V
+ * clocksource will be the default.
  */
 
-struct clocksource *hyperv_cs;
-EXPORT_SYMBOL_GPL(hyperv_cs);
+u64 (*hv_read_reference_counter)(void);
+EXPORT_SYMBOL_GPL(hv_read_reference_counter);
 
-static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE);
+static union {
+       struct ms_hyperv_tsc_page page;
+       u8 reserved[PAGE_SIZE];
+} tsc_pg __aligned(PAGE_SIZE);
 
 struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
 {
-       return &tsc_pg;
+       return &tsc_pg.page;
 }
 EXPORT_SYMBOL_GPL(hv_get_tsc_page);
 
-static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
+static u64 notrace read_hv_clock_tsc(void)
 {
-       u64 current_tick = hv_read_tsc_page(&tsc_pg);
+       u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
 
        if (current_tick == U64_MAX)
                hv_get_time_ref_count(current_tick);
@@ -325,20 +336,50 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
        return current_tick;
 }
 
+static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
+{
+       return read_hv_clock_tsc();
+}
+
 static u64 read_hv_sched_clock_tsc(void)
 {
-       return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
+       return read_hv_clock_tsc() - hv_sched_clock_offset;
+}
+
+static void suspend_hv_clock_tsc(struct clocksource *arg)
+{
+       u64 tsc_msr;
+
+       /* Disable the TSC page */
+       hv_get_reference_tsc(tsc_msr);
+       tsc_msr &= ~BIT_ULL(0);
+       hv_set_reference_tsc(tsc_msr);
+}
+
+
+static void resume_hv_clock_tsc(struct clocksource *arg)
+{
+       phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
+       u64 tsc_msr;
+
+       /* Re-enable the TSC page */
+       hv_get_reference_tsc(tsc_msr);
+       tsc_msr &= GENMASK_ULL(11, 0);
+       tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
+       hv_set_reference_tsc(tsc_msr);
 }
 
 static struct clocksource hyperv_cs_tsc = {
        .name   = "hyperv_clocksource_tsc_page",
-       .rating = 400,
-       .read   = read_hv_clock_tsc,
+       .rating = 250,
+       .read   = read_hv_clock_tsc_cs,
        .mask   = CLOCKSOURCE_MASK(64),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+       .suspend= suspend_hv_clock_tsc,
+       .resume = resume_hv_clock_tsc,
 };
 
-static u64 notrace read_hv_clock_msr(struct clocksource *arg)
+static u64 notrace read_hv_clock_msr(void)
 {
        u64 current_tick;
        /*
@@ -350,15 +391,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
        return current_tick;
 }
 
+static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
+{
+       return read_hv_clock_msr();
+}
+
 static u64 read_hv_sched_clock_msr(void)
 {
-       return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
+       return read_hv_clock_msr() - hv_sched_clock_offset;
 }
 
 static struct clocksource hyperv_cs_msr = {
        .name   = "hyperv_clocksource_msr",
-       .rating = 400,
-       .read   = read_hv_clock_msr,
+       .rating = 250,
+       .read   = read_hv_clock_msr_cs,
        .mask   = CLOCKSOURCE_MASK(64),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
@@ -371,8 +417,8 @@ static bool __init hv_init_tsc_clocksource(void)
        if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
                return false;
 
-       hyperv_cs = &hyperv_cs_tsc;
-       phys_addr = virt_to_phys(&tsc_pg);
+       hv_read_reference_counter = read_hv_clock_tsc;
+       phys_addr = virt_to_phys(hv_get_tsc_page());
 
        /*
         * The Hyper-V TLFS specifies to preserve the value of reserved
@@ -389,7 +435,7 @@ static bool __init hv_init_tsc_clocksource(void)
        hv_set_clocksource_vdso(hyperv_cs_tsc);
        clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
 
-       hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+       hv_sched_clock_offset = hv_read_reference_counter();
        hv_setup_sched_clock(read_hv_sched_clock_tsc);
 
        return true;
@@ -411,10 +457,10 @@ void __init hv_init_clocksource(void)
        if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
                return;
 
-       hyperv_cs = &hyperv_cs_msr;
+       hv_read_reference_counter = read_hv_clock_msr;
        clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
 
-       hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+       hv_sched_clock_offset = hv_read_reference_counter();
        hv_setup_sched_clock(read_hv_sched_clock_msr);
 }
 EXPORT_SYMBOL_GPL(hv_init_clocksource);
index 9cde50cb322008a02a1d38c56f68a9a07f923fac..12ac75f7571f279e9effc339e53162cccfad77b0 100644 (file)
@@ -905,7 +905,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
                return -ENXIO;
        }
 
-       cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
+       cmt->mapbase = ioremap(mem->start, resource_size(mem));
        if (cmt->mapbase == NULL) {
                dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
                return -ENXIO;
index 64526e50d471c1f096b372a69fbaee1d2948da4b..bfccb31e94ad9914314e258cd89ce573d78fabfb 100644 (file)
@@ -377,7 +377,7 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
                return -ENXIO;
        }
 
-       mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
+       mtu->mapbase = ioremap(res->start, resource_size(res));
        if (mtu->mapbase == NULL)
                return -ENXIO;
 
index d49690d1553670e65bede80e78e2e07e648b5ecd..d41df9ba3725d0d2462a3db9a363e0501f4753a4 100644 (file)
@@ -486,7 +486,7 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
                return -ENXIO;
        }
 
-       tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
+       tmu->mapbase = ioremap(res->start, resource_size(res));
        if (tmu->mapbase == NULL)
                return -ENXIO;
 
index 88fe2e9ba9a356df7e0c964e651ba016240b841d..38858e141731ef855047977c3da6b2a69241a2d0 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/of_irq.h>
 #include <linux/slab.h>
 #include <linux/sched_clock.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
 
 /*
  * This driver configures the 2 16/32-bit count-up timers as follows:
@@ -464,13 +466,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
        return 0;
 }
 
-/**
- * ttc_timer_init - Initialize the timer
- *
- * Initializes the timer hardware and register the clock source and clock event
- * timers with Linux kernal timer framework
- */
-static int __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_probe(struct platform_device *pdev)
 {
        unsigned int irq;
        void __iomem *timer_baseaddr;
@@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer)
        static int initialized;
        int clksel, ret;
        u32 timer_width = 16;
+       struct device_node *timer = pdev->dev.of_node;
 
        if (initialized)
                return 0;
@@ -532,4 +529,17 @@ static int __init ttc_timer_init(struct device_node *timer)
        return 0;
 }
 
-TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+static const struct of_device_id ttc_timer_of_match[] = {
+       {.compatible = "cdns,ttc"},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, ttc_timer_of_match);
+
+static struct platform_driver ttc_timer_driver = {
+       .driver = {
+               .name   = "cdns_ttc_timer",
+               .of_match_table = ttc_timer_of_match,
+       },
+};
+builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe);
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
new file mode 100644 (file)
index 0000000..bd63d34
--- /dev/null
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 64-bit Periodic Interval Timer driver
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define MCHP_PIT64B_CR                 0x00    /* Control Register */
+#define MCHP_PIT64B_CR_START           BIT(0)
+#define MCHP_PIT64B_CR_SWRST           BIT(8)
+
+#define MCHP_PIT64B_MR                 0x04    /* Mode Register */
+#define MCHP_PIT64B_MR_CONT            BIT(0)
+#define MCHP_PIT64B_MR_ONE_SHOT                (0)
+#define MCHP_PIT64B_MR_SGCLK           BIT(3)
+#define MCHP_PIT64B_MR_PRES            GENMASK(11, 8)
+
+#define MCHP_PIT64B_LSB_PR             0x08    /* LSB Period Register */
+
+#define MCHP_PIT64B_MSB_PR             0x0C    /* MSB Period Register */
+
+#define MCHP_PIT64B_IER                        0x10    /* Interrupt Enable Register */
+#define MCHP_PIT64B_IER_PERIOD         BIT(0)
+
+#define MCHP_PIT64B_ISR                        0x1C    /* Interrupt Status Register */
+
+#define MCHP_PIT64B_TLSBR              0x20    /* Timer LSB Register */
+
+#define MCHP_PIT64B_TMSBR              0x24    /* Timer MSB Register */
+
+#define MCHP_PIT64B_PRES_MAX           0x10
+#define MCHP_PIT64B_LSBMASK            GENMASK_ULL(31, 0)
+#define MCHP_PIT64B_PRES_TO_MODE(p)    (MCHP_PIT64B_MR_PRES & ((p) << 8))
+#define MCHP_PIT64B_MODE_TO_PRES(m)    ((MCHP_PIT64B_MR_PRES & (m)) >> 8)
+#define MCHP_PIT64B_DEF_CS_FREQ                5000000UL       /* 5 MHz */
+#define MCHP_PIT64B_DEF_CE_FREQ                32768           /* 32 KHz */
+
+#define MCHP_PIT64B_NAME               "pit64b"
+
+/**
+ * struct mchp_pit64b_timer - PIT64B timer data structure
+ * @base: base address of PIT64B hardware block
+ * @pclk: PIT64B's peripheral clock
+ * @gclk: PIT64B's generic clock
+ * @mode: precomputed value for mode register
+ */
+struct mchp_pit64b_timer {
+       void __iomem    *base;
+       struct clk      *pclk;
+       struct clk      *gclk;
+       u32             mode;
+};
+
+/**
+ * mchp_pit64b_clkevt - PIT64B clockevent data structure
+ * @timer: PIT64B timer
+ * @clkevt: clockevent
+ */
+struct mchp_pit64b_clkevt {
+       struct mchp_pit64b_timer        timer;
+       struct clock_event_device       clkevt;
+};
+
+#define to_mchp_pit64b_timer(x) \
+       ((struct mchp_pit64b_timer *)container_of(x,\
+               struct mchp_pit64b_clkevt, clkevt))
+
+/* Base address for clocksource timer. */
+static void __iomem *mchp_pit64b_cs_base;
+/* Default cycles for clockevent timer. */
+static u64 mchp_pit64b_ce_cycles;
+
+static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
+{
+       unsigned long   flags;
+       u32             low, high;
+
+       raw_local_irq_save(flags);
+
+       /*
+        * When using a 64 bit period TLSB must be read first, followed by the
+        * read of TMSB. This sequence generates an atomic read of the 64 bit
+        * timer value whatever the lapse of time between the accesses.
+        */
+       low = readl_relaxed(base + MCHP_PIT64B_TLSBR);
+       high = readl_relaxed(base + MCHP_PIT64B_TMSBR);
+
+       raw_local_irq_restore(flags);
+
+       return (((u64)high << 32) | low);
+}
+
+static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer,
+                                    u64 cycles, u32 mode, u32 irqs)
+{
+       u32 low, high;
+
+       low = cycles & MCHP_PIT64B_LSBMASK;
+       high = cycles >> 32;
+
+       writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+       writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR);
+       writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR);
+       writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR);
+       writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER);
+       writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR);
+}
+
+static u64 mchp_pit64b_clksrc_read(struct clocksource *cs)
+{
+       return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static u64 mchp_pit64b_sched_read_clk(void)
+{
+       return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
+static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
+{
+       struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+       writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+
+       return 0;
+}
+
+static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev)
+{
+       struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+       mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT,
+                         MCHP_PIT64B_IER_PERIOD);
+
+       return 0;
+}
+
+static int mchp_pit64b_clkevt_set_next_event(unsigned long evt,
+                                            struct clock_event_device *cedev)
+{
+       struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+       mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT,
+                         MCHP_PIT64B_IER_PERIOD);
+
+       return 0;
+}
+
+static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev)
+{
+       struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+       writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR);
+       if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+               clk_disable_unprepare(timer->gclk);
+       clk_disable_unprepare(timer->pclk);
+}
+
+static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev)
+{
+       struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev);
+
+       clk_prepare_enable(timer->pclk);
+       if (timer->mode & MCHP_PIT64B_MR_SGCLK)
+               clk_prepare_enable(timer->gclk);
+}
+
+static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id)
+{
+       struct mchp_pit64b_clkevt *irq_data = dev_id;
+
+       /* Need to clear the interrupt. */
+       readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR);
+
+       irq_data->clkevt.event_handler(&irq_data->clkevt);
+
+       return IRQ_HANDLED;
+}
+
+static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate,
+                                           u32 max_rate)
+{
+       u32 tmp;
+
+       for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) {
+               tmp = clk_rate / (*pres + 1);
+               if (tmp <= max_rate)
+                       break;
+       }
+
+       /* Use the bigest prescaler if we didn't match one. */
+       if (*pres == MCHP_PIT64B_PRES_MAX)
+               *pres = MCHP_PIT64B_PRES_MAX - 1;
+}
+
+/**
+ * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at
+ *                        runtime; this includes prescaler and SGCLK bit
+ *
+ * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to
+ * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate
+ * could be changed via clock APIs. The chosen clock (pclk or gclk) could be
+ * divided by the internal PIT64B's divider.
+ *
+ * This function, first tries to use GCLK by requesting the desired rate from
+ * PMC and then using the internal PIT64B prescaler, if any, to reach the
+ * requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware)
+ * then the function falls back on using PCLK as clock source for PIT64B timer
+ * choosing the highest prescaler in case it doesn't locate one to match the
+ * requested frequency.
+ *
+ * Below is presented the PIT64B block in relation with PMC:
+ *
+ *                                PIT64B
+ *  PMC             +------------------------------------+
+ * +----+           |   +-----+                          |
+ * |    |-->gclk -->|-->|     |    +---------+  +-----+  |
+ * |    |           |   | MUX |--->| Divider |->|timer|  |
+ * |    |-->pclk -->|-->|     |    +---------+  +-----+  |
+ * +----+           |   +-----+                          |
+ *                  |      ^                             |
+ *                  |     sel                            |
+ *                  +------------------------------------+
+ *
+ * Where:
+ *     - gclk rate <= pclk rate/3
+ *     - gclk rate could be requested from PMC
+ *     - pclk rate is fixed (cannot be requested from PMC)
+ */
+static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer,
+                                       unsigned long max_rate)
+{
+       unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX;
+       long gclk_round = 0;
+       u32 pres, best_pres = 0;
+
+       pclk_rate = clk_get_rate(timer->pclk);
+       if (!pclk_rate)
+               return -EINVAL;
+
+       timer->mode = 0;
+
+       /* Try using GCLK. */
+       gclk_round = clk_round_rate(timer->gclk, max_rate);
+       if (gclk_round < 0)
+               goto pclk;
+
+       if (pclk_rate / gclk_round < 3)
+               goto pclk;
+
+       mchp_pit64b_pres_compute(&pres, gclk_round, max_rate);
+       best_diff = abs(gclk_round / (pres + 1) - max_rate);
+       best_pres = pres;
+
+       if (!best_diff) {
+               timer->mode |= MCHP_PIT64B_MR_SGCLK;
+               goto done;
+       }
+
+pclk:
+       /* Check if requested rate could be obtained using PCLK. */
+       mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate);
+       diff = abs(pclk_rate / (pres + 1) - max_rate);
+
+       if (best_diff > diff) {
+               /* Use PCLK. */
+               best_pres = pres;
+       } else {
+               /* Use GCLK. */
+               timer->mode |= MCHP_PIT64B_MR_SGCLK;
+               clk_set_rate(timer->gclk, gclk_round);
+       }
+
+done:
+       timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres);
+
+       pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n",
+               timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres,
+               timer->mode & MCHP_PIT64B_MR_SGCLK ?
+               gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1));
+
+       return 0;
+}
+
+static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
+                                         u32 clk_rate)
+{
+       int ret;
+
+       mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0);
+
+       mchp_pit64b_cs_base = timer->base;
+
+       ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate,
+                                   210, 64, mchp_pit64b_clksrc_read);
+       if (ret) {
+               pr_debug("clksrc: Failed to register PIT64B clocksource!\n");
+
+               /* Stop timer. */
+               writel_relaxed(MCHP_PIT64B_CR_SWRST,
+                              timer->base + MCHP_PIT64B_CR);
+
+               return ret;
+       }
+
+       sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
+
+       return 0;
+}
+
+static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer,
+                                         u32 clk_rate, u32 irq)
+{
+       struct mchp_pit64b_clkevt *ce;
+       int ret;
+
+       ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+       if (!ce)
+               return -ENOMEM;
+
+       mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ);
+
+       ce->timer.base = timer->base;
+       ce->timer.pclk = timer->pclk;
+       ce->timer.gclk = timer->gclk;
+       ce->timer.mode = timer->mode;
+       ce->clkevt.name = MCHP_PIT64B_NAME;
+       ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
+       ce->clkevt.rating = 150;
+       ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown;
+       ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic;
+       ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event;
+       ce->clkevt.suspend = mchp_pit64b_clkevt_suspend;
+       ce->clkevt.resume = mchp_pit64b_clkevt_resume;
+       ce->clkevt.cpumask = cpumask_of(0);
+       ce->clkevt.irq = irq;
+
+       ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER,
+                         "pit64b_tick", ce);
+       if (ret) {
+               pr_debug("clkevt: Failed to setup PIT64B IRQ\n");
+               kfree(ce);
+               return ret;
+       }
+
+       clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX);
+
+       return 0;
+}
+
+static int __init mchp_pit64b_dt_init_timer(struct device_node *node,
+                                           bool clkevt)
+{
+       u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ;
+       struct mchp_pit64b_timer timer;
+       unsigned long clk_rate;
+       u32 irq = 0;
+       int ret;
+
+       /* Parse DT node. */
+       timer.pclk = of_clk_get_by_name(node, "pclk");
+       if (IS_ERR(timer.pclk))
+               return PTR_ERR(timer.pclk);
+
+       timer.gclk = of_clk_get_by_name(node, "gclk");
+       if (IS_ERR(timer.gclk))
+               return PTR_ERR(timer.gclk);
+
+       timer.base = of_iomap(node, 0);
+       if (!timer.base)
+               return -ENXIO;
+
+       if (clkevt) {
+               irq = irq_of_parse_and_map(node, 0);
+               if (!irq) {
+                       ret = -ENODEV;
+                       goto io_unmap;
+               }
+       }
+
+       /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */
+       ret = mchp_pit64b_init_mode(&timer, freq);
+       if (ret)
+               goto irq_unmap;
+
+       ret = clk_prepare_enable(timer.pclk);
+       if (ret)
+               goto irq_unmap;
+
+       if (timer.mode & MCHP_PIT64B_MR_SGCLK) {
+               ret = clk_prepare_enable(timer.gclk);
+               if (ret)
+                       goto pclk_unprepare;
+
+               clk_rate = clk_get_rate(timer.gclk);
+       } else {
+               clk_rate = clk_get_rate(timer.pclk);
+       }
+       clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1);
+
+       if (clkevt)
+               ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq);
+       else
+               ret = mchp_pit64b_init_clksrc(&timer, clk_rate);
+
+       if (ret)
+               goto gclk_unprepare;
+
+       return 0;
+
+gclk_unprepare:
+       if (timer.mode & MCHP_PIT64B_MR_SGCLK)
+               clk_disable_unprepare(timer.gclk);
+pclk_unprepare:
+       clk_disable_unprepare(timer.pclk);
+irq_unmap:
+       irq_dispose_mapping(irq);
+io_unmap:
+       iounmap(timer.base);
+
+       return ret;
+}
+
+static int __init mchp_pit64b_dt_init(struct device_node *node)
+{
+       static int inits;
+
+       switch (inits++) {
+       case 0:
+               /* 1st request, register clockevent. */
+               return mchp_pit64b_dt_init_timer(node, true);
+       case 1:
+               /* 2nd request, register clocksource. */
+               return mchp_pit64b_dt_init_timer(node, false);
+       }
+
+       /* The rest, don't care. */
+       return -EINVAL;
+}
+
+TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init);
index 4e54856ce2a5f3c79d36d7d650b5c538128f66d8..c4f15c4068c02b0cc37831543a33a2c87fe9aa51 100644 (file)
@@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
        return get_cycles64();
 }
 
-static u64 riscv_sched_clock(void)
+static u64 notrace riscv_sched_clock(void)
 {
        return get_cycles64();
 }
index 5394d9dbdfbc633b4a9662dc1fbc09a2bfd40aed..269a994d6a99fddaf22a359d74a4f2f174d55681 100644 (file)
@@ -780,7 +780,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
 {
        unsigned long flags;
        struct omap_dm_timer *timer;
-       struct resource *mem, *irq;
        struct device *dev = &pdev->dev;
        const struct dmtimer_platform_data *pdata;
        int ret;
@@ -796,24 +795,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (unlikely(!irq)) {
-               dev_err(dev, "%s: no IRQ resource.\n", __func__);
-               return -ENODEV;
-       }
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (unlikely(!mem)) {
-               dev_err(dev, "%s: no memory resource.\n", __func__);
-               return -ENODEV;
-       }
-
        timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
        if (!timer)
                return  -ENOMEM;
 
+       timer->irq = platform_get_irq(pdev, 0);
+       if (timer->irq < 0)
+               return timer->irq;
+
        timer->fclk = ERR_PTR(-ENODEV);
-       timer->io_base = devm_ioremap_resource(dev, mem);
+       timer->io_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(timer->io_base))
                return PTR_ERR(timer->io_base);
 
@@ -836,7 +827,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
        if (pdata)
                timer->errata = pdata->timer_errata;
 
-       timer->irq = irq->start;
        timer->pdev = pdev;
 
        pm_runtime_enable(dev);
index 77b0e5d0fb1344b29c452cb96be05fcefec0d516..4f86ce2db34fbd791bd41dd99f6644ed9ea4a121 100644 (file)
@@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
        struct private_data *priv = policy->driver_data;
 
+       cpufreq_cpu_put(policy);
+
        return brcm_avs_get_frequency(priv->base);
 }
 
index 8d8da763adc57c1f31cec0f388fd6e466b008f8f..a06777c35fc051df2f5a3bbe41345a50be1aa5f4 100644 (file)
@@ -39,7 +39,7 @@
 static struct cppc_cpudata **all_cpu_data;
 
 struct cppc_workaround_oem_info {
-       char oem_id[ACPI_OEM_ID_SIZE +1];
+       char oem_id[ACPI_OEM_ID_SIZE + 1];
        char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
        u32 oem_revision;
 };
@@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void)
        for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
                if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
                    !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
-                   wa_info[i].oem_revision == tbl->oem_revision)
+                   wa_info[i].oem_revision == tbl->oem_revision) {
                        apply_hisi_workaround = true;
+                       break;
+               }
        }
+
+       acpi_put_table(tbl);
 }
 
 /* Callback function used to retrieve the max frequency from DMI */
index f1d170dcf4d310cfa7c936ab1638b93b67f109ca..f2ae9cd455c17bb14121f20c838ab8a8e204dd53 100644 (file)
@@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = {
        { .compatible = "fsl,imx8mq", },
        { .compatible = "fsl,imx8mm", },
        { .compatible = "fsl,imx8mn", },
+       { .compatible = "fsl,imx8mp", },
 
        { .compatible = "marvell,armadaxp", },
 
@@ -121,6 +122,8 @@ static const struct of_device_id blacklist[] __initconst = {
        { .compatible = "mediatek,mt8176", },
        { .compatible = "mediatek,mt8183", },
 
+       { .compatible = "nvidia,tegra20", },
+       { .compatible = "nvidia,tegra30", },
        { .compatible = "nvidia,tegra124", },
        { .compatible = "nvidia,tegra210", },
 
index 85a6efd6b68f9a5ab02a11cae4ba0f7c374c44d9..6cb8193421ea1522aec53a95947685e4c69a4430 100644 (file)
@@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       if (of_machine_is_compatible("fsl,imx8mn"))
+       if (of_machine_is_compatible("fsl,imx8mn") ||
+           of_machine_is_compatible("fsl,imx8mp"))
                speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
                              >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
        else
@@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
                if (of_machine_is_compatible("fsl,imx8mm") ||
                    of_machine_is_compatible("fsl,imx8mq"))
                        speed_grade = 1;
-               if (of_machine_is_compatible("fsl,imx8mn"))
+               if (of_machine_is_compatible("fsl,imx8mn") ||
+                   of_machine_is_compatible("fsl,imx8mp"))
                        speed_grade = 0xb;
        }
 
index d2fa3e9ccd97c4f5256bca74a879666e1fc43210..ad6a17cf0011abb96dbb7754c7dd84d16d68ebe0 100644 (file)
@@ -172,7 +172,7 @@ struct vid_data {
 /**
  * struct global_params - Global parameters, mostly tunable via sysfs.
  * @no_turbo:          Whether or not to use turbo P-states.
- * @turbo_disabled:    Whethet or not turbo P-states are available at all,
+ * @turbo_disabled:    Whether or not turbo P-states are available at all,
  *                     based on the MSR_IA32_MISC_ENABLE value and whether or
  *                     not the maximum reported turbo P-state is different from
  *                     the maximum reported non-turbo one.
index cb74bdc5baaa40a2ecff1ce0fe11d528a8585f32..70ad8fe1d78b9462aba6d522b0e313bf8a406da2 100644 (file)
@@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
 static int kirkwood_cpufreq_probe(struct platform_device *pdev)
 {
        struct device_node *np;
-       struct resource *res;
        int err;
 
        priv.dev = &pdev->dev;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv.base = devm_ioremap_resource(&pdev->dev, res);
+       priv.base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv.base))
                return PTR_ERR(priv.base);
 
index e9caa9586982acfd137aa02b083d4b5ff08607f3..909f40fbcde2c0459764eb5ea35e2246785b7655 100644 (file)
@@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void)
        u32 cpu_freq;
 
        spin_lock_irqsave(&loongson2_wait_lock, flags);
-       cpu_freq = LOONGSON_CHIPCFG(0);
-       LOONGSON_CHIPCFG(0) &= ~0x7;    /* Put CPU into wait mode */
-       LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
+       cpu_freq = readl(LOONGSON_CHIPCFG);
+       /* Put CPU into wait mode */
+       writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+       /* Restore CPU state */
+       writel(cpu_freq, LOONGSON_CHIPCFG);
        spin_unlock_irqrestore(&loongson2_wait_lock, flags);
        local_irq_enable();
 }
index fdc767fdbe6a8bdc14dfef0b90201ce4b25e1b7a..89d4fa8b65e983a6793fc7f44344d21e2a863f75 100644 (file)
@@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void)
                goto out_free;
        }
 
-       pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
+       pcch_virt_addr = ioremap(mem_resource->minimum,
                                        mem_resource->address_length);
        if (pcch_virt_addr == NULL) {
                pr_debug("probe: could not map shared mem region\n");
index 106910351c41683a23752ccde7b3da2069005ed3..5c221bc90210e8b1b702d37239a70748d88ddf21 100644 (file)
@@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
 {
        struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
        int ret;
+       struct cpufreq_policy *policy;
 
        mutex_lock(&cpufreq_lock);
 
@@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
         */
        if (s3c_freq->is_dvs) {
                pr_debug("cpufreq: leave dvs on reboot\n");
-               ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
+
+               policy = cpufreq_cpu_get(0);
+               if (!policy) {
+                       pr_debug("cpufreq: get no policy for cpu0\n");
+                       return NOTIFY_BAD;
+               }
+
+               ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
+               cpufreq_cpu_put(policy);
+
                if (ret < 0)
                        return NOTIFY_BAD;
        }
index 5d10030f2560218e0b1d381fca19817459165b2d..e84281e2561d22a5ff930cb8aca78bf805bef5c9 100644 (file)
@@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
                                                 unsigned long event, void *ptr)
 {
        int ret;
+       struct cpufreq_policy *policy;
+
+       policy = cpufreq_cpu_get(0);
+       if (!policy) {
+               pr_debug("cpufreq: get no policy for cpu0\n");
+               return NOTIFY_BAD;
+       }
+
+       ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
+       cpufreq_cpu_put(policy);
 
-       ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
        if (ret < 0)
                return NOTIFY_BAD;
 
index bcecb068b51b7e7a973c50012ffe9343bc81174a..2e233ad72758958f13bcba65117971e429027738 100644 (file)
@@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
 {
        struct tegra186_cpufreq_data *data;
        struct tegra_bpmp *bpmp;
-       struct resource *res;
        unsigned int i = 0, err;
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
        if (IS_ERR(bpmp))
                return PTR_ERR(bpmp);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       data->regs = devm_ioremap_resource(&pdev->dev, res);
+       data->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(data->regs)) {
                err = PTR_ERR(data->regs);
                goto put_bpmp;
index 506e3f2bf53aca6c62242c0f38ab3ebe3350ddf8..83c85d3d67e355495695b278b37586869e466f0c 100644 (file)
@@ -434,7 +434,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
        if (cur_cluster < MAX_CLUSTERS) {
                int cpu;
 
-               cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+               dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus);
 
                for_each_cpu(cpu, policy->cpus)
                        per_cpu(physical_cluster, cpu) = cur_cluster;
index a224d33dda7f27f0fa248c295cb7e11b1f6fbd74..62272ecfa771fee1a5a12d428dc9e99c0a84fd84 100644 (file)
@@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE
 
 config ARM_BIG_LITTLE_CPUIDLE
        bool "Support for ARM big.LITTLE processors"
-       depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
+       depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
        depends on MCPM && !ARM64
        select ARM_CPU_SUSPEND
        select CPU_IDLE_MULTIPLE_DRIVERS
@@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE
 
 config ARM_KIRKWOOD_CPUIDLE
        bool "CPU Idle Driver for Marvell Kirkwood SoCs"
-       depends on MACH_KIRKWOOD && !ARM64
+       depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64
        help
          This adds the CPU Idle driver for Marvell Kirkwood SoCs.
 
 config ARM_ZYNQ_CPUIDLE
        bool "CPU Idle Driver for Xilinx Zynq processors"
-       depends on ARCH_ZYNQ && !ARM64
+       depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64
        help
          Select this to enable cpuidle on Xilinx Zynq processors.
 
@@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE
 config ARM_AT91_CPUIDLE
        bool "Cpu Idle Driver for the AT91 processors"
        default y
-       depends on ARCH_AT91 && !ARM64
+       depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64
        help
          Select this to enable cpuidle for AT91 processors.
 
 config ARM_EXYNOS_CPUIDLE
        bool "Cpu Idle Driver for the Exynos processors"
-       depends on ARCH_EXYNOS && !ARM64
+       depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64
        select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
        help
          Select this to enable cpuidle for Exynos processors.
 
 config ARM_MVEBU_V7_CPUIDLE
        bool "CPU Idle Driver for mvebu v7 family processors"
-       depends on ARCH_MVEBU && !ARM64
+       depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
        help
          Select this to enable cpuidle on Armada 370, 38x and XP processors.
index b607278df25b4a09e639922b57a0b642e6366a46..04003b90dc4974ee210e5d4ae4570d295541fe8a 100644 (file)
@@ -89,6 +89,7 @@
  * @coupled_cpus: mask of cpus that are part of the coupled set
  * @requested_state: array of requested states for cpus in the coupled set
  * @ready_waiting_counts: combined count of cpus  in ready or waiting loops
+ * @abort_barrier: synchronisation point for abort cases
  * @online_count: count of cpus that are online
  * @refcnt: reference count of cpuidle devices that are using this struct
  * @prevent: flag to prevent coupled idle while a cpu is hotplugging
@@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu)
 
 /**
  * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
- * @dev: struct cpuidle_device for this cpu
+ * @this_cpu: target cpu
  * @coupled: the struct coupled that contains the current cpu
  *
  * Calls cpuidle_coupled_poke on all other online cpus.
@@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
 
 /**
  * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
  * @coupled: the struct coupled that contains the current cpu
  * @next_state: the index in drv->states of the requested state for this cpu
  *
@@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu,
 
 /**
  * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
  * @coupled: the struct coupled that contains the current cpu
  *
  * Removes the requested idle state for the specified cpuidle device.
@@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
 
 /**
  * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
- * @cpu - this cpu
+ * @cpu: this cpu
  *
  * Turns on interrupts and spins until any outstanding poke interrupts have
  * been processed and the poke bit has been cleared.
index 6e36740f5719ba0fff74aa713ab7728d9362bc8c..fc22c59b6c73b5016a9490786d427e4aea60cd96 100644 (file)
@@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = {
 
 static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
 {
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
+       clps711x_halt = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(clps711x_halt))
                return PTR_ERR(clps711x_halt);
 
index d23d8f468c12212136861461cf5ca764c93f5668..511c4f46027a941d920cf10fecd4cea9d6dad5c0 100644 (file)
@@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = {
 /* Initialize CPU idle by registering the idle states */
 static int kirkwood_cpuidle_probe(struct platform_device *pdev)
 {
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+       ddr_operation_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ddr_operation_base))
                return PTR_ERR(ddr_operation_base);
 
index 0005be5ea2b45f560a2a378a3c66b89354b3fb42..de81298051b32a41635d4d31d2c1132df00d3ba2 100644 (file)
@@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns)
  * cpuidle_find_deepest_state - Find the deepest available idle state.
  * @drv: cpuidle driver for the given CPU.
  * @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
+ *
+ * Return: the index of the deepest available idle state.
  */
 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
                               struct cpuidle_device *dev,
@@ -381,7 +384,8 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
                if (dev->states_usage[i].disable)
                        continue;
 
-               limit_ns = (u64)drv->states[i].target_residency_ns;
+               limit_ns = drv->states[i].target_residency_ns;
+               break;
        }
 
        dev->poll_limit_ns = limit_ns;
@@ -571,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
        if (!try_module_get(drv->owner))
                return -EINVAL;
 
-       for (i = 0; i < drv->state_count; i++)
+       for (i = 0; i < drv->state_count; i++) {
                if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
                        dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
 
+               if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
+                       dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+       }
+
        per_cpu(cpuidle_devices, dev->cpu) = dev;
        list_add(&dev->device_list, &cpuidle_detected_devices);
 
index c76423aaef4d7ebe1098ec63507752ecf0eb194a..4070e573bf43a49ee638085448d46e705894e74d 100644 (file)
@@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
 {
        int i;
 
-       drv->refcnt = 0;
-
        /*
         * Use all possible CPUs as the default, because if the kernel boots
         * with some CPUs offline and then we online one of them, the CPU
@@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
  */
 static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
 {
-       if (WARN_ON(drv->refcnt > 0))
-               return;
-
        if (drv->bctimer) {
                drv->bctimer = 0;
                on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
@@ -349,47 +344,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
 }
 EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
 
-/**
- * cpuidle_driver_ref - get a reference to the driver.
- *
- * Increment the reference counter of the cpuidle driver associated with
- * the current CPU.
- *
- * Returns a pointer to the driver, or NULL if the current CPU has no driver.
- */
-struct cpuidle_driver *cpuidle_driver_ref(void)
-{
-       struct cpuidle_driver *drv;
-
-       spin_lock(&cpuidle_driver_lock);
-
-       drv = cpuidle_get_driver();
-       if (drv)
-               drv->refcnt++;
-
-       spin_unlock(&cpuidle_driver_lock);
-       return drv;
-}
-
-/**
- * cpuidle_driver_unref - puts down the refcount for the driver
- *
- * Decrement the reference counter of the cpuidle driver associated with
- * the current CPU.
- */
-void cpuidle_driver_unref(void)
-{
-       struct cpuidle_driver *drv;
-
-       spin_lock(&cpuidle_driver_lock);
-
-       drv = cpuidle_get_driver();
-       if (drv && !WARN_ON(drv->refcnt <= 0))
-               drv->refcnt--;
-
-       spin_unlock(&cpuidle_driver_lock);
-}
-
 /**
  * cpuidle_driver_state_disabled - Disable or enable an idle state
  * @drv: cpuidle driver owning the state
@@ -403,6 +357,13 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
 
        mutex_lock(&cpuidle_lock);
 
+       spin_lock(&cpuidle_driver_lock);
+
+       if (!drv->cpumask) {
+               drv->states[idx].flags |= CPUIDLE_FLAG_UNUSABLE;
+               goto unlock;
+       }
+
        for_each_cpu(cpu, drv->cpumask) {
                struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 
@@ -415,5 +376,8 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
                        dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
        }
 
+unlock:
+       spin_unlock(&cpuidle_driver_lock);
+
        mutex_unlock(&cpuidle_lock);
 }
index de7e706efd460e86dea44c7b21fc5d078d6bf8f4..6deaaf5f05b5765598115132492edc4f654df9da 100644 (file)
@@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * pattern detection.
         */
        cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
-       if (cpu_data->interval_idx > INTERVALS)
+       if (cpu_data->interval_idx >= INTERVALS)
                cpu_data->interval_idx = 0;
 }
 
index 38ef770be90db82f1574fb6f85b2e951748413ae..cdeedbf02646da3b060e271b83d1ff7d57e5a6ed 100644 (file)
@@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = {
 
 /**
  * cpuidle_add_interface - add CPU global sysfs attributes
+ * @dev: the target device
  */
 int cpuidle_add_interface(struct device *dev)
 {
@@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev)
 
 /**
  * cpuidle_remove_interface - remove CPU global sysfs attributes
+ * @dev: the target device
  */
 void cpuidle_remove_interface(struct device *dev)
 {
@@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state,
        return size;
 }
 
+static ssize_t show_state_default_status(struct cpuidle_state *state,
+                                         struct cpuidle_state_usage *state_usage,
+                                         char *buf)
+{
+       return sprintf(buf, "%s\n",
+                      state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
+}
+
 define_one_state_ro(name, show_state_name);
 define_one_state_ro(desc, show_state_desc);
 define_one_state_ro(latency, show_state_exit_latency);
@@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time);
 define_one_state_rw(disable, show_state_disable, store_state_disable);
 define_one_state_ro(above, show_state_above);
 define_one_state_ro(below, show_state_below);
+define_one_state_ro(default_status, show_state_default_status);
 
 static struct attribute *cpuidle_state_default_attrs[] = {
        &attr_name.attr,
@@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
        &attr_disable.attr,
        &attr_above.attr,
        &attr_below.attr,
+       &attr_default_status.attr,
        NULL
 };
 
@@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = {
 
 /**
  * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
  */
 static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
 {
@@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
 
 /**
  * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
  */
 static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
 {
index 4e7323884ae30ea9b5bc83ab77707bce48859f43..354836468c5d9854e12a1777b76b55227766ab53 100644 (file)
@@ -2507,7 +2507,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                addr = pci_resource_start(pdev, i);
                size = pci_resource_len(pdev, i);
 
-               dev->bar[i] = ioremap_nocache(addr, size);
+               dev->bar[i] = ioremap(addr, size);
                if (!dev->bar[i]) {
                        err = -ENOMEM;
                        goto err_out_unmap_bars;
index 26754d0570ba9cffcf829b873ec2ecab69c2abe6..b846d73d9a855ceafb74129377c581460760821c 100644 (file)
@@ -40,7 +40,7 @@ struct sec_req {
        int req_id;
 
        /* Status of the SEC request */
-       int fake_busy;
+       atomic_t fake_busy;
 };
 
 /**
@@ -132,8 +132,8 @@ struct sec_debug_file {
 };
 
 struct sec_dfx {
-       u64 send_cnt;
-       u64 recv_cnt;
+       atomic64_t send_cnt;
+       atomic64_t recv_cnt;
 };
 
 struct sec_debug {
index 62b04e19067c3cfb5fb1856ab15885f9e7197237..0a5391fff485c43447498f837fb05f4f6803f4a9 100644 (file)
@@ -120,7 +120,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
                return;
        }
 
-       __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+       atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
 
        req->ctx->req_op->buf_unmap(req->ctx, req);
 
@@ -135,13 +135,13 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
        mutex_lock(&qp_ctx->req_lock);
        ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
        mutex_unlock(&qp_ctx->req_lock);
-       __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+       atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
 
        if (ret == -EBUSY)
                return -ENOBUFS;
 
        if (!ret) {
-               if (req->fake_busy)
+               if (atomic_read(&req->fake_busy))
                        ret = -EBUSY;
                else
                        ret = -EINPROGRESS;
@@ -641,7 +641,7 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
        if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
                sec_update_iv(req);
 
-       if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+       if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
                sk_req->base.complete(&sk_req->base, -EINPROGRESS);
 
        sk_req->base.complete(&sk_req->base, req->err_type);
@@ -672,9 +672,9 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
        }
 
        if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
-               req->fake_busy = 1;
+               atomic_set(&req->fake_busy, 1);
        else
-               req->fake_busy = 0;
+               atomic_set(&req->fake_busy, 0);
 
        ret = ctx->req_op->get_res(ctx, req);
        if (ret) {
index 74f0654028c968c7582ddb133ea30a74e13b2b74..ab742dfbab997562b9a6a9b55a2b1fe700797258 100644 (file)
@@ -608,6 +608,14 @@ static const struct file_operations sec_dbg_fops = {
        .write = sec_debug_write,
 };
 
+static int debugfs_atomic64_t_get(void *data, u64 *val)
+{
+        *val = atomic64_read((atomic64_t *)data);
+        return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
+                        "%lld\n");
+
 static int sec_core_debug_init(struct sec_dev *sec)
 {
        struct hisi_qm *qm = &sec->qm;
@@ -628,9 +636,11 @@ static int sec_core_debug_init(struct sec_dev *sec)
 
        debugfs_create_regset32("regs", 0444, tmp_d, regset);
 
-       debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+       debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
+                           &fops_atomic64_t_ro);
 
-       debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+       debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
+                           &fops_atomic64_t_ro);
 
        return 0;
 }
index defe1d4387105e4a4ff368b57d8a0fbcadb9fd61..0b1df12e0f21ce57ec5c3108095b3e60d5178b6e 100644 (file)
@@ -77,13 +77,12 @@ config DEVFREQ_GOV_PASSIVE
 comment "DEVFREQ Drivers"
 
 config ARM_EXYNOS_BUS_DEVFREQ
-       tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+       tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver"
        depends on ARCH_EXYNOS || COMPILE_TEST
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
        select DEVFREQ_GOV_PASSIVE
        select DEVFREQ_EVENT_EXYNOS_PPMU
        select PM_DEVFREQ_EVENT
-       select PM_OPP
        help
          This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
          Memory bus has one more group of memory bus (e.g, MIF and INT block).
@@ -92,13 +91,23 @@ config ARM_EXYNOS_BUS_DEVFREQ
          and adjusts the operating frequencies and voltages with OPP support.
          This does not yet operate with optimal voltages.
 
+config ARM_IMX8M_DDRC_DEVFREQ
+       tristate "i.MX8M DDRC DEVFREQ Driver"
+       depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
+               (COMPILE_TEST && HAVE_ARM_SMCCC)
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
+       select DEVFREQ_GOV_USERSPACE
+       help
+         This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows
+         adjusting DRAM frequency.
+
 config ARM_TEGRA_DEVFREQ
        tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver"
        depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \
                ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
                ARCH_TEGRA_210_SOC || \
                COMPILE_TEST
-       select PM_OPP
+       depends on COMMON_CLK
        help
          This adds the DEVFREQ driver for the Tegra family of SoCs.
          It reads ACTMON counters of memory controllers and adjusts the
@@ -109,7 +118,6 @@ config ARM_TEGRA20_DEVFREQ
        depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
        depends on COMMON_CLK
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
-       select PM_OPP
        help
          This adds the DEVFREQ driver for the Tegra20 family of SoCs.
          It reads Memory Controller counters and adjusts the operating
@@ -117,15 +125,15 @@ config ARM_TEGRA20_DEVFREQ
 
 config ARM_RK3399_DMC_DEVFREQ
        tristate "ARM RK3399 DMC DEVFREQ Driver"
-       depends on ARCH_ROCKCHIP
+       depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+               (COMPILE_TEST && HAVE_ARM_SMCCC)
        select DEVFREQ_EVENT_ROCKCHIP_DFI
        select DEVFREQ_GOV_SIMPLE_ONDEMAND
        select PM_DEVFREQ_EVENT
-       select PM_OPP
        help
-          This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
-          It sets the frequency for the memory controller and reads the usage counts
-          from hardware.
+         This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+         It sets the frequency for the memory controller and reads the usage counts
+         from hardware.
 
 source "drivers/devfreq/event/Kconfig"
 
index 338ae8440db63a82baeb9050d96f74599b9f2bd2..3eb4d5e6635c602a857287c1b5c7614439203347 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)       += governor_passive.o
 
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)   += exynos-bus.o
+obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ)   += imx8m-ddrc.o
 obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ)   += rk3399_dmc.o
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)                += tegra30-devfreq.o
 obj-$(CONFIG_ARM_TEGRA20_DEVFREQ)      += tegra20-devfreq.o
index 3dc5fd6065a306f211852caa5098bac01f2eec3f..8c31b0f2e28f7fdf1faa59a95d048d4567a4e8d6 100644 (file)
@@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
 
 /**
  * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
- * @dev                : the devfreq-event device
+ * @edev       : the devfreq-event device
  *
- * Note that this function remove the registered devfreq-event device.
+ * Note that this function removes the registered devfreq-event device.
  */
 int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
 {
index 425149e8bab0799753cc1db9a82104fb7bb6600d..cceee8bc3c2f745a02ab7b648d29458cbeaf2283 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/kmod.h>
 #include <linux/sched.h>
+#include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/printk.h>
 #include <linux/hrtimer.h>
 #include <linux/of.h>
+#include <linux/pm_qos.h>
 #include "governor.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/devfreq.h>
 
+#define HZ_PER_KHZ     1000
+
 static struct class *devfreq_class;
+static struct dentry *devfreq_debugfs;
 
 /*
  * devfreq core provides delayed work based load monitoring helper
@@ -98,6 +103,54 @@ static unsigned long find_available_max_freq(struct devfreq *devfreq)
        return max_freq;
 }
 
+/**
+ * get_freq_range() - Get the current freq range
+ * @devfreq:   the devfreq instance
+ * @min_freq:  the min frequency
+ * @max_freq:  the max frequency
+ *
+ * This takes into consideration all constraints.
+ */
+static void get_freq_range(struct devfreq *devfreq,
+                          unsigned long *min_freq,
+                          unsigned long *max_freq)
+{
+       unsigned long *freq_table = devfreq->profile->freq_table;
+       s32 qos_min_freq, qos_max_freq;
+
+       lockdep_assert_held(&devfreq->lock);
+
+       /*
+        * Initialize minimum/maximum frequency from freq table.
+        * The devfreq drivers can initialize this in either ascending or
+        * descending order and devfreq core supports both.
+        */
+       if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
+               *min_freq = freq_table[0];
+               *max_freq = freq_table[devfreq->profile->max_state - 1];
+       } else {
+               *min_freq = freq_table[devfreq->profile->max_state - 1];
+               *max_freq = freq_table[0];
+       }
+
+       /* Apply constraints from PM QoS */
+       qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
+                                            DEV_PM_QOS_MIN_FREQUENCY);
+       qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
+                                            DEV_PM_QOS_MAX_FREQUENCY);
+       *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
+       if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
+               *max_freq = min(*max_freq,
+                               (unsigned long)HZ_PER_KHZ * qos_max_freq);
+
+       /* Apply constraints from OPP interface */
+       *min_freq = max(*min_freq, devfreq->scaling_min_freq);
+       *max_freq = min(*max_freq, devfreq->scaling_max_freq);
+
+       if (*min_freq > *max_freq)
+               *min_freq = *max_freq;
+}
+
 /**
  * devfreq_get_freq_level() - Lookup freq_table for the frequency
  * @devfreq:   the devfreq instance
@@ -158,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq)
 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
 {
        int lev, prev_lev, ret = 0;
-       unsigned long cur_time;
+       u64 cur_time;
 
        lockdep_assert_held(&devfreq->lock);
-       cur_time = jiffies;
+       cur_time = get_jiffies_64();
 
        /* Immediately exit if previous_freq is not initialized yet. */
        if (!devfreq->previous_freq)
@@ -173,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
                goto out;
        }
 
-       devfreq->time_in_state[prev_lev] +=
-                        cur_time - devfreq->last_stat_updated;
+       devfreq->stats.time_in_state[prev_lev] +=
+                       cur_time - devfreq->stats.last_update;
 
        lev = devfreq_get_freq_level(devfreq, freq);
        if (lev < 0) {
@@ -183,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
        }
 
        if (lev != prev_lev) {
-               devfreq->trans_table[(prev_lev *
-                               devfreq->profile->max_state) + lev]++;
-               devfreq->total_trans++;
+               devfreq->stats.trans_table[
+                       (prev_lev * devfreq->profile->max_state) + lev]++;
+               devfreq->stats.total_trans++;
        }
 
 out:
-       devfreq->last_stat_updated = cur_time;
+       devfreq->stats.last_update = cur_time;
        return ret;
 }
 EXPORT_SYMBOL(devfreq_update_status);
@@ -351,16 +404,7 @@ int update_devfreq(struct devfreq *devfreq)
        err = devfreq->governor->get_target_freq(devfreq, &freq);
        if (err)
                return err;
-
-       /*
-        * Adjust the frequency with user freq, QoS and available freq.
-        *
-        * List from the highest priority
-        * max_freq
-        * min_freq
-        */
-       max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq);
-       min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq);
+       get_freq_range(devfreq, &min_freq, &max_freq);
 
        if (freq < min_freq) {
                freq = min_freq;
@@ -493,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
                        msecs_to_jiffies(devfreq->profile->polling_ms));
 
 out_update:
-       devfreq->last_stat_updated = jiffies;
+       devfreq->stats.last_update = get_jiffies_64();
        devfreq->stop_polling = false;
 
        if (devfreq->profile->get_cur_freq &&
@@ -568,26 +612,69 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
                                 void *devp)
 {
        struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
-       int ret;
+       int err = -EINVAL;
 
        mutex_lock(&devfreq->lock);
 
        devfreq->scaling_min_freq = find_available_min_freq(devfreq);
-       if (!devfreq->scaling_min_freq) {
-               mutex_unlock(&devfreq->lock);
-               return -EINVAL;
-       }
+       if (!devfreq->scaling_min_freq)
+               goto out;
 
        devfreq->scaling_max_freq = find_available_max_freq(devfreq);
        if (!devfreq->scaling_max_freq) {
-               mutex_unlock(&devfreq->lock);
-               return -EINVAL;
+               devfreq->scaling_max_freq = ULONG_MAX;
+               goto out;
        }
 
-       ret = update_devfreq(devfreq);
+       err = update_devfreq(devfreq);
+
+out:
        mutex_unlock(&devfreq->lock);
+       if (err)
+               dev_err(devfreq->dev.parent,
+                       "failed to update frequency from OPP notifier (%d)\n",
+                       err);
 
-       return ret;
+       return NOTIFY_OK;
+}
+
+/**
+ * qos_notifier_call() - Common handler for QoS constraints.
+ * @devfreq:    the devfreq instance.
+ */
+static int qos_notifier_call(struct devfreq *devfreq)
+{
+       int err;
+
+       mutex_lock(&devfreq->lock);
+       err = update_devfreq(devfreq);
+       mutex_unlock(&devfreq->lock);
+       if (err)
+               dev_err(devfreq->dev.parent,
+                       "failed to update frequency from PM QoS (%d)\n",
+                       err);
+
+       return NOTIFY_OK;
+}
+
+/**
+ * qos_min_notifier_call() - Callback for QoS min_freq changes.
+ * @nb:                Should be devfreq->nb_min
+ */
+static int qos_min_notifier_call(struct notifier_block *nb,
+                                        unsigned long val, void *ptr)
+{
+       return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
+}
+
+/**
+ * qos_max_notifier_call() - Callback for QoS max_freq changes.
+ * @nb:                Should be devfreq->nb_max
+ */
+static int qos_max_notifier_call(struct notifier_block *nb,
+                                        unsigned long val, void *ptr)
+{
+       return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
 }
 
 /**
@@ -599,16 +686,36 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
 static void devfreq_dev_release(struct device *dev)
 {
        struct devfreq *devfreq = to_devfreq(dev);
+       int err;
 
        mutex_lock(&devfreq_list_lock);
-       if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
-               mutex_unlock(&devfreq_list_lock);
-               dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
-               return;
-       }
        list_del(&devfreq->node);
        mutex_unlock(&devfreq_list_lock);
 
+       err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
+                                        DEV_PM_QOS_MAX_FREQUENCY);
+       if (err && err != -ENOENT)
+               dev_warn(dev->parent,
+                       "Failed to remove max_freq notifier: %d\n", err);
+       err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
+                                        DEV_PM_QOS_MIN_FREQUENCY);
+       if (err && err != -ENOENT)
+               dev_warn(dev->parent,
+                       "Failed to remove min_freq notifier: %d\n", err);
+
+       if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
+               err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
+               if (err)
+                       dev_warn(dev->parent,
+                               "Failed to remove max_freq request: %d\n", err);
+       }
+       if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
+               err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
+               if (err)
+                       dev_warn(dev->parent,
+                               "Failed to remove min_freq request: %d\n", err);
+       }
+
        if (devfreq->profile->exit)
                devfreq->profile->exit(devfreq->dev.parent);
 
@@ -660,6 +767,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq->dev.parent = dev;
        devfreq->dev.class = devfreq_class;
        devfreq->dev.release = devfreq_dev_release;
+       INIT_LIST_HEAD(&devfreq->node);
        devfreq->profile = profile;
        strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
        devfreq->previous_freq = profile->initial_freq;
@@ -681,7 +789,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
                err = -EINVAL;
                goto err_dev;
        }
-       devfreq->min_freq = devfreq->scaling_min_freq;
 
        devfreq->scaling_max_freq = find_available_max_freq(devfreq);
        if (!devfreq->scaling_max_freq) {
@@ -689,7 +796,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
                err = -EINVAL;
                goto err_dev;
        }
-       devfreq->max_freq = devfreq->scaling_max_freq;
 
        devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
        atomic_set(&devfreq->suspend_count, 0);
@@ -703,33 +809,56 @@ struct devfreq *devfreq_add_device(struct device *dev,
                goto err_out;
        }
 
-       devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+       devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
                        array3_size(sizeof(unsigned int),
                                    devfreq->profile->max_state,
                                    devfreq->profile->max_state),
                        GFP_KERNEL);
-       if (!devfreq->trans_table) {
+       if (!devfreq->stats.trans_table) {
                mutex_unlock(&devfreq->lock);
                err = -ENOMEM;
                goto err_devfreq;
        }
 
-       devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+       devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
                        devfreq->profile->max_state,
-                       sizeof(unsigned long),
+                       sizeof(*devfreq->stats.time_in_state),
                        GFP_KERNEL);
-       if (!devfreq->time_in_state) {
+       if (!devfreq->stats.time_in_state) {
                mutex_unlock(&devfreq->lock);
                err = -ENOMEM;
                goto err_devfreq;
        }
 
-       devfreq->last_stat_updated = jiffies;
+       devfreq->stats.total_trans = 0;
+       devfreq->stats.last_update = get_jiffies_64();
 
        srcu_init_notifier_head(&devfreq->transition_notifier_list);
 
        mutex_unlock(&devfreq->lock);
 
+       err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
+                                    DEV_PM_QOS_MIN_FREQUENCY, 0);
+       if (err < 0)
+               goto err_devfreq;
+       err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
+                                    DEV_PM_QOS_MAX_FREQUENCY,
+                                    PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+       if (err < 0)
+               goto err_devfreq;
+
+       devfreq->nb_min.notifier_call = qos_min_notifier_call;
+       err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
+                                     DEV_PM_QOS_MIN_FREQUENCY);
+       if (err)
+               goto err_devfreq;
+
+       devfreq->nb_max.notifier_call = qos_max_notifier_call;
+       err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
+                                     DEV_PM_QOS_MAX_FREQUENCY);
+       if (err)
+               goto err_devfreq;
+
        mutex_lock(&devfreq_list_lock);
 
        governor = try_then_request_governor(devfreq->governor_name);
@@ -1133,6 +1262,14 @@ err_out:
 }
 EXPORT_SYMBOL(devfreq_remove_governor);
 
+static ssize_t name_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct devfreq *devfreq = to_devfreq(dev);
+       return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+}
+static DEVICE_ATTR_RO(name);
+
 static ssize_t governor_show(struct device *dev,
                             struct device_attribute *attr, char *buf)
 {
@@ -1303,42 +1440,39 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
        unsigned long value;
        int ret;
 
+       /*
+        * Protect against theoretical sysfs writes between
+        * device_add and dev_pm_qos_add_request
+        */
+       if (!dev_pm_qos_request_active(&df->user_min_freq_req))
+               return -EAGAIN;
+
        ret = sscanf(buf, "%lu", &value);
        if (ret != 1)
                return -EINVAL;
 
-       mutex_lock(&df->lock);
-
-       if (value) {
-               if (value > df->max_freq) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-       } else {
-               unsigned long *freq_table = df->profile->freq_table;
-
-               /* Get minimum frequency according to sorting order */
-               if (freq_table[0] < freq_table[df->profile->max_state - 1])
-                       value = freq_table[0];
-               else
-                       value = freq_table[df->profile->max_state - 1];
-       }
+       /* Round down to kHz for PM QoS */
+       ret = dev_pm_qos_update_request(&df->user_min_freq_req,
+                                       value / HZ_PER_KHZ);
+       if (ret < 0)
+               return ret;
 
-       df->min_freq = value;
-       update_devfreq(df);
-       ret = count;
-unlock:
-       mutex_unlock(&df->lock);
-       return ret;
+       return count;
 }
 
 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
        struct devfreq *df = to_devfreq(dev);
+       unsigned long min_freq, max_freq;
+
+       mutex_lock(&df->lock);
+       get_freq_range(df, &min_freq, &max_freq);
+       mutex_unlock(&df->lock);
 
-       return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq));
+       return sprintf(buf, "%lu\n", min_freq);
 }
+static DEVICE_ATTR_RW(min_freq);
 
 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
                              const char *buf, size_t count)
@@ -1347,42 +1481,50 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
        unsigned long value;
        int ret;
 
+       /*
+        * Protect against theoretical sysfs writes between
+        * device_add and dev_pm_qos_add_request
+        */
+       if (!dev_pm_qos_request_active(&df->user_max_freq_req))
+               return -EINVAL;
+
        ret = sscanf(buf, "%lu", &value);
        if (ret != 1)
                return -EINVAL;
 
-       mutex_lock(&df->lock);
-
-       if (value) {
-               if (value < df->min_freq) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-       } else {
-               unsigned long *freq_table = df->profile->freq_table;
+       /*
+        * PM QoS frequencies are in kHz so we need to convert. Convert by
+        * rounding upwards so that the acceptable interval never shrinks.
+        *
+        * For example if the user writes "666666666" to sysfs this value will
+        * be converted to 666667 kHz and back to 666667000 Hz before an OPP
+        * lookup, this ensures that an OPP of 666666666Hz is still accepted.
+        *
+        * A value of zero means "no limit".
+        */
+       if (value)
+               value = DIV_ROUND_UP(value, HZ_PER_KHZ);
+       else
+               value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
 
-               /* Get maximum frequency according to sorting order */
-               if (freq_table[0] < freq_table[df->profile->max_state - 1])
-                       value = freq_table[df->profile->max_state - 1];
-               else
-                       value = freq_table[0];
-       }
+       ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
+       if (ret < 0)
+               return ret;
 
-       df->max_freq = value;
-       update_devfreq(df);
-       ret = count;
-unlock:
-       mutex_unlock(&df->lock);
-       return ret;
+       return count;
 }
-static DEVICE_ATTR_RW(min_freq);
 
 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
        struct devfreq *df = to_devfreq(dev);
+       unsigned long min_freq, max_freq;
+
+       mutex_lock(&df->lock);
+       get_freq_range(df, &min_freq, &max_freq);
+       mutex_unlock(&df->lock);
 
-       return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq));
+       return sprintf(buf, "%lu\n", max_freq);
 }
 static DEVICE_ATTR_RW(max_freq);
 
@@ -1449,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev,
                                devfreq->profile->freq_table[i]);
                for (j = 0; j < max_state; j++)
                        len += sprintf(buf + len, "%10u",
-                               devfreq->trans_table[(i * max_state) + j]);
-               len += sprintf(buf + len, "%10u\n",
-                       jiffies_to_msecs(devfreq->time_in_state[i]));
+                               devfreq->stats.trans_table[(i * max_state) + j]);
+
+               len += sprintf(buf + len, "%10llu\n", (u64)
+                       jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
        }
 
        len += sprintf(buf + len, "Total transition : %u\n",
-                                       devfreq->total_trans);
+                                       devfreq->stats.total_trans);
        return len;
 }
-static DEVICE_ATTR_RO(trans_stat);
+
+static ssize_t trans_stat_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct devfreq *df = to_devfreq(dev);
+       int err, value;
+
+       if (df->profile->max_state == 0)
+               return count;
+
+       err = kstrtoint(buf, 10, &value);
+       if (err || value != 0)
+               return -EINVAL;
+
+       mutex_lock(&df->lock);
+       memset(df->stats.time_in_state, 0, (df->profile->max_state *
+                                       sizeof(*df->stats.time_in_state)));
+       memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
+                                       df->profile->max_state,
+                                       df->profile->max_state));
+       df->stats.total_trans = 0;
+       df->stats.last_update = get_jiffies_64();
+       mutex_unlock(&df->lock);
+
+       return count;
+}
+static DEVICE_ATTR_RW(trans_stat);
 
 static struct attribute *devfreq_attrs[] = {
+       &dev_attr_name.attr,
        &dev_attr_governor.attr,
        &dev_attr_available_governors.attr,
        &dev_attr_cur_freq.attr,
@@ -1474,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = {
 };
 ATTRIBUTE_GROUPS(devfreq);
 
+/**
+ * devfreq_summary_show() - Show the summary of the devfreq devices
+ * @s:         seq_file instance to show the summary of devfreq devices
+ * @data:      not used
+ *
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
+ * It helps that user can know the detailed information of the devfreq devices.
+ *
+ * Return 0 always because it shows the information without any data change.
+ */
+static int devfreq_summary_show(struct seq_file *s, void *data)
+{
+       struct devfreq *devfreq;
+       struct devfreq *p_devfreq = NULL;
+       unsigned long cur_freq, min_freq, max_freq;
+       unsigned int polling_ms;
+
+       seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
+                       "dev_name",
+                       "dev",
+                       "parent_dev",
+                       "governor",
+                       "polling_ms",
+                       "cur_freq_Hz",
+                       "min_freq_Hz",
+                       "max_freq_Hz");
+       seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+                       "------------------------------",
+                       "----------",
+                       "----------",
+                       "---------------",
+                       "----------",
+                       "------------",
+                       "------------",
+                       "------------");
+
+       mutex_lock(&devfreq_list_lock);
+
+       list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+               if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
+                                                       DEVFREQ_NAME_LEN)) {
+                       struct devfreq_passive_data *data = devfreq->data;
+
+                       if (data)
+                               p_devfreq = data->parent;
+               } else {
+                       p_devfreq = NULL;
+               }
+#endif
+
+               mutex_lock(&devfreq->lock);
+               cur_freq = devfreq->previous_freq,
+               get_freq_range(devfreq, &min_freq, &max_freq);
+               polling_ms = devfreq->profile->polling_ms,
+               mutex_unlock(&devfreq->lock);
+
+               seq_printf(s,
+                       "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
+                       dev_name(devfreq->dev.parent),
+                       dev_name(&devfreq->dev),
+                       p_devfreq ? dev_name(&p_devfreq->dev) : "null",
+                       devfreq->governor_name,
+                       polling_ms,
+                       cur_freq,
+                       min_freq,
+                       max_freq);
+       }
+
+       mutex_unlock(&devfreq_list_lock);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
+
 static int __init devfreq_init(void)
 {
        devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -1490,6 +1736,11 @@ static int __init devfreq_init(void)
        }
        devfreq_class->dev_groups = devfreq_groups;
 
+       devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
+       debugfs_create_file("devfreq_summary", 0444,
+                               devfreq_debugfs, NULL,
+                               &devfreq_summary_fops);
+
        return 0;
 }
 subsys_initcall(devfreq_init);
@@ -1683,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res)
 
 /**
  * devm_devfreq_register_notifier()
      - Resource-managed devfreq_register_notifier()
*     - Resource-managed devfreq_register_notifier()
  * @dev:       The devfreq user device. (parent of devfreq)
  * @devfreq:   The devfreq object.
  * @nb:                The notifier block to be unregistered.
@@ -1719,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
 
 /**
  * devm_devfreq_unregister_notifier()
      - Resource-managed devfreq_unregister_notifier()
*     - Resource-managed devfreq_unregister_notifier()
  * @dev:       The devfreq user device. (parent of devfreq)
  * @devfreq:   The devfreq object.
  * @nb:                The notifier block to be unregistered.
index cef2cf5347ca76507c1afa01f49a2174fba15ee0..878825372f6f7d7560e443fd794293450ea4ff3d 100644 (file)
@@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT
 if PM_DEVFREQ_EVENT
 
 config DEVFREQ_EVENT_EXYNOS_NOCP
-       tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+       tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver"
        depends on ARCH_EXYNOS || COMPILE_TEST
        select PM_OPP
        select REGMAP_MMIO
@@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
          (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
 
 config DEVFREQ_EVENT_EXYNOS_PPMU
-       tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+       tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
        depends on ARCH_EXYNOS || COMPILE_TEST
        select PM_OPP
        help
@@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
 
 config DEVFREQ_EVENT_ROCKCHIP_DFI
        tristate "ROCKCHIP DFI DEVFREQ event Driver"
-       depends on ARCH_ROCKCHIP
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
        help
          This add the devfreq-event driver for Rockchip SoC. It provides DFI
          (DDR Monitor Module) driver to count ddr load.
index 1c565926db9f588186e2cfcf73199d43bcf87420..ccc531ee6938fb8e9ba8824e709a3a960d2251f7 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support
  *
  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
  * Author : Chanwoo Choi <cw00.choi@samsung.com>
index 55cc96284a367766638a034f6d34182d053a6a9e..2d6f08cfd0c5831a59d3eb0103aece64445f8dbb 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file
  *
  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
  * Author : Chanwoo Choi <cw00.choi@samsung.com>
index 85c7a77bf3f0dd01a1e489718f5dca704325952c..17ed980d9099861229ec780c9a7ea68567d0e54d 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support
  *
  * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
  * Author : Chanwoo Choi <cw00.choi@samsung.com>
@@ -101,17 +101,22 @@ static struct __exynos_ppmu_events {
        PPMU_EVENT(dmc1_1),
 };
 
-static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
 {
        int i;
 
        for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
-               if (!strcmp(edev->desc->name, ppmu_events[i].name))
+               if (!strcmp(edev_name, ppmu_events[i].name))
                        return ppmu_events[i].id;
 
        return -EINVAL;
 }
 
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+       return __exynos_ppmu_find_ppmu_id(edev->desc->name);
+}
+
 /*
  * The devfreq-event ops structure for PPMU v1.1
  */
@@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np,
                         * use default if not.
                         */
                        if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
-                               struct devfreq_event_dev edev;
                                int id;
                                /* Not all registers take the same value for
                                 * read+write data count.
                                 */
-                               edev.desc = &desc[j];
-                               id = exynos_ppmu_find_ppmu_id(&edev);
+                               id = __exynos_ppmu_find_ppmu_id(desc[j].name);
 
                                switch (id) {
                                case PPMU_PMNCNT0:
index 2844200474550209bcf6c28ebee3ecdd6dc5226f..97f667d0cbddfd6d8fc74039da51f18376a52684 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * exynos_ppmu.h - EXYNOS PPMU header file
+ * exynos_ppmu.h - Exynos PPMU header file
  *
  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  * Author : Chanwoo Choi <cw00.choi@samsung.com>
index 5d1042188727ce7450d6eb7f524a9f28dd07ccfb..9a88faaf8b27ffbd44fa2c55d8b603aa21e34317 100644 (file)
@@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct rockchip_dfi *data;
-       struct resource *res;
        struct devfreq_event_desc *desc;
        struct device_node *np = pdev->dev.of_node, *node;
 
@@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
        if (!data)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       data->regs = devm_ioremap_resource(&pdev->dev, res);
+       data->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(data->regs))
                return PTR_ERR(data->regs);
 
@@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
        node = of_parse_phandle(np, "rockchip,pmu", 0);
        if (node) {
                data->regmap_pmu = syscon_node_to_regmap(node);
+               of_node_put(node);
                if (IS_ERR(data->regmap_pmu))
                        return PTR_ERR(data->regmap_pmu);
        }
index c832673273a28daa7413fac8bb770eb8af0c4ca1..8fa8eb5413732fa7bab5df8e344b4a76d8597d3d 100644 (file)
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
-#include <linux/slab.h>
 
 #define DEFAULT_SATURATION_RATIO       40
 
@@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
 
        ret = exynos_bus_get_event(bus, &edata);
        if (ret < 0) {
+               dev_err(dev, "failed to get event from devfreq-event devices\n");
                stat->total_time = stat->busy_time = 0;
                goto err;
        }
@@ -287,52 +287,12 @@ err_clk:
        return ret;
 }
 
-static int exynos_bus_probe(struct platform_device *pdev)
+static int exynos_bus_profile_init(struct exynos_bus *bus,
+                                  struct devfreq_dev_profile *profile)
 {
-       struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node, *node;
-       struct devfreq_dev_profile *profile;
+       struct device *dev = bus->dev;
        struct devfreq_simple_ondemand_data *ondemand_data;
-       struct devfreq_passive_data *passive_data;
-       struct devfreq *parent_devfreq;
-       struct exynos_bus *bus;
-       int ret, max_state;
-       unsigned long min_freq, max_freq;
-       bool passive = false;
-
-       if (!np) {
-               dev_err(dev, "failed to find devicetree node\n");
-               return -EINVAL;
-       }
-
-       bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
-       if (!bus)
-               return -ENOMEM;
-       mutex_init(&bus->lock);
-       bus->dev = &pdev->dev;
-       platform_set_drvdata(pdev, bus);
-
-       profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
-       if (!profile)
-               return -ENOMEM;
-
-       node = of_parse_phandle(dev->of_node, "devfreq", 0);
-       if (node) {
-               of_node_put(node);
-               passive = true;
-       } else {
-               ret = exynos_bus_parent_parse_of(np, bus);
-               if (ret < 0)
-                       return ret;
-       }
-
-       /* Parse the device-tree to get the resource information */
-       ret = exynos_bus_parse_of(np, bus);
-       if (ret < 0)
-               goto err_reg;
-
-       if (passive)
-               goto passive;
+       int ret;
 
        /* Initialize the struct profile and governor data for parent device */
        profile->polling_ms = 50;
@@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
        profile->exit = exynos_bus_exit;
 
        ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
-       if (!ondemand_data) {
-               ret = -ENOMEM;
-               goto err;
-       }
+       if (!ondemand_data)
+               return -ENOMEM;
+
        ondemand_data->upthreshold = 40;
        ondemand_data->downdifferential = 5;
 
@@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev)
                                                ondemand_data);
        if (IS_ERR(bus->devfreq)) {
                dev_err(dev, "failed to add devfreq device\n");
-               ret = PTR_ERR(bus->devfreq);
-               goto err;
+               return PTR_ERR(bus->devfreq);
        }
 
        /* Register opp_notifier to catch the change of OPP  */
        ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
        if (ret < 0) {
                dev_err(dev, "failed to register opp notifier\n");
-               goto err;
+               return ret;
        }
 
        /*
@@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev)
        ret = exynos_bus_enable_edev(bus);
        if (ret < 0) {
                dev_err(dev, "failed to enable devfreq-event devices\n");
-               goto err;
+               return ret;
        }
 
        ret = exynos_bus_set_event(bus);
        if (ret < 0) {
                dev_err(dev, "failed to set event to devfreq-event devices\n");
-               goto err;
+               goto err_edev;
        }
 
-       goto out;
-passive:
+       return 0;
+
+err_edev:
+       if (exynos_bus_disable_edev(bus))
+               dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+       return ret;
+}
+
+static int exynos_bus_profile_init_passive(struct exynos_bus *bus,
+                                          struct devfreq_dev_profile *profile)
+{
+       struct device *dev = bus->dev;
+       struct devfreq_passive_data *passive_data;
+       struct devfreq *parent_devfreq;
+
        /* Initialize the struct profile and governor data for passive device */
        profile->target = exynos_bus_target;
        profile->exit = exynos_bus_passive_exit;
 
        /* Get the instance of parent devfreq device */
        parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
-       if (IS_ERR(parent_devfreq)) {
-               ret = -EPROBE_DEFER;
-               goto err;
-       }
+       if (IS_ERR(parent_devfreq))
+               return -EPROBE_DEFER;
 
        passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
-       if (!passive_data) {
-               ret = -ENOMEM;
-               goto err;
-       }
+       if (!passive_data)
+               return -ENOMEM;
+
        passive_data->parent = parent_devfreq;
 
        /* Add devfreq device for exynos bus with passive governor */
@@ -407,11 +376,61 @@ passive:
        if (IS_ERR(bus->devfreq)) {
                dev_err(dev,
                        "failed to add devfreq dev with passive governor\n");
-               ret = PTR_ERR(bus->devfreq);
-               goto err;
+               return PTR_ERR(bus->devfreq);
+       }
+
+       return 0;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node, *node;
+       struct devfreq_dev_profile *profile;
+       struct exynos_bus *bus;
+       int ret, max_state;
+       unsigned long min_freq, max_freq;
+       bool passive = false;
+
+       if (!np) {
+               dev_err(dev, "failed to find devicetree node\n");
+               return -EINVAL;
        }
 
-out:
+       bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+       if (!bus)
+               return -ENOMEM;
+       mutex_init(&bus->lock);
+       bus->dev = &pdev->dev;
+       platform_set_drvdata(pdev, bus);
+
+       profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+       if (!profile)
+               return -ENOMEM;
+
+       node = of_parse_phandle(dev->of_node, "devfreq", 0);
+       if (node) {
+               of_node_put(node);
+               passive = true;
+       } else {
+               ret = exynos_bus_parent_parse_of(np, bus);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* Parse the device-tree to get the resource information */
+       ret = exynos_bus_parse_of(np, bus);
+       if (ret < 0)
+               goto err_reg;
+
+       if (passive)
+               ret = exynos_bus_profile_init_passive(bus, profile);
+       else
+               ret = exynos_bus_profile_init(bus, profile);
+
+       if (ret < 0)
+               goto err;
+
        max_state = bus->devfreq->profile->max_state;
        min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
        max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c
new file mode 100644 (file)
index 0000000..bc82d36
--- /dev/null
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/arm-smccc.h>
+
+#define IMX_SIP_DDR_DVFS                       0xc2000004
+
+/* Query available frequencies. */
+#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT                0x10
+#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO         0x11
+
+/*
+ * This should be in a 1:1 mapping with devicetree OPPs but
+ * firmware provides additional info.
+ */
+struct imx8m_ddrc_freq {
+       unsigned long rate;
+       unsigned long smcarg;
+       int dram_core_parent_index;
+       int dram_alt_parent_index;
+       int dram_apb_parent_index;
+};
+
+/* Hardware limitation */
+#define IMX8M_DDRC_MAX_FREQ_COUNT 4
+
+/*
+ * i.MX8M DRAM Controller clocks have the following structure (abridged):
+ *
+ * +----------+       |\            +------+
+ * | dram_pll |-------|M| dram_core |      |
+ * +----------+       |U|---------->| D    |
+ *                 /--|X|           |  D   |
+ *   dram_alt_root |  |/            |   R  |
+ *                 |                |    C |
+ *            +---------+           |      |
+ *            |FIX DIV/4|           |      |
+ *            +---------+           |      |
+ *  composite:     |                |      |
+ * +----------+    |                |      |
+ * | dram_alt |----/                |      |
+ * +----------+                     |      |
+ * | dram_apb |-------------------->|      |
+ * +----------+                     +------+
+ *
+ * The dram_pll is used for higher rates and dram_alt is used for lower rates.
+ *
+ * Frequency switching is implemented in TF-A (via SMC call) and can change the
+ * configuration of the clocks, including mux parents. The dram_alt and
+ * dram_apb clocks are "imx composite" and their parent can change too.
+ *
+ * We need to prepare/enable the new mux parents head of switching and update
+ * their information afterwards.
+ */
+struct imx8m_ddrc {
+       struct devfreq_dev_profile profile;
+       struct devfreq *devfreq;
+
+       /* For frequency switching: */
+       struct clk *dram_core;
+       struct clk *dram_pll;
+       struct clk *dram_alt;
+       struct clk *dram_apb;
+
+       int freq_count;
+       struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT];
+};
+
+static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv,
+                                                   unsigned long rate)
+{
+       struct imx8m_ddrc_freq *freq;
+       int i;
+
+       /*
+        * Firmware reports values in MT/s, so we round-down from Hz
+        * Rounding is extra generous to ensure a match.
+        */
+       rate = DIV_ROUND_CLOSEST(rate, 250000);
+       for (i = 0; i < priv->freq_count; ++i) {
+               freq = &priv->freq_table[i];
+               if (freq->rate == rate ||
+                               freq->rate + 1 == rate ||
+                               freq->rate - 1 == rate)
+                       return freq;
+       }
+
+       return NULL;
+}
+
+static void imx8m_ddrc_smc_set_freq(int target_freq)
+{
+       struct arm_smccc_res res;
+       u32 online_cpus = 0;
+       int cpu;
+
+       local_irq_disable();
+
+       for_each_online_cpu(cpu)
+               online_cpus |= (1 << (cpu * 8));
+
+       /* change the ddr freqency */
+       arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus,
+                       0, 0, 0, 0, 0, &res);
+
+       local_irq_enable();
+}
+
+static struct clk *clk_get_parent_by_index(struct clk *clk, int index)
+{
+       struct clk_hw *hw;
+
+       hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index);
+
+       return hw ? hw->clk : NULL;
+}
+
+static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq)
+{
+       struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+       struct clk *new_dram_core_parent;
+       struct clk *new_dram_alt_parent;
+       struct clk *new_dram_apb_parent;
+       int ret;
+
+       /*
+        * Fetch new parents
+        *
+        * new_dram_alt_parent and new_dram_apb_parent are optional but
+        * new_dram_core_parent is not.
+        */
+       new_dram_core_parent = clk_get_parent_by_index(
+                       priv->dram_core, freq->dram_core_parent_index - 1);
+       if (!new_dram_core_parent) {
+               dev_err(dev, "failed to fetch new dram_core parent\n");
+               return -EINVAL;
+       }
+       if (freq->dram_alt_parent_index) {
+               new_dram_alt_parent = clk_get_parent_by_index(
+                               priv->dram_alt,
+                               freq->dram_alt_parent_index - 1);
+               if (!new_dram_alt_parent) {
+                       dev_err(dev, "failed to fetch new dram_alt parent\n");
+                       return -EINVAL;
+               }
+       } else
+               new_dram_alt_parent = NULL;
+
+       if (freq->dram_apb_parent_index) {
+               new_dram_apb_parent = clk_get_parent_by_index(
+                               priv->dram_apb,
+                               freq->dram_apb_parent_index - 1);
+               if (!new_dram_apb_parent) {
+                       dev_err(dev, "failed to fetch new dram_apb parent\n");
+                       return -EINVAL;
+               }
+       } else
+               new_dram_apb_parent = NULL;
+
+       /* increase reference counts and ensure clks are ON before switch */
+       ret = clk_prepare_enable(new_dram_core_parent);
+       if (ret) {
+               dev_err(dev, "failed to enable new dram_core parent: %d\n",
+                       ret);
+               goto out;
+       }
+       ret = clk_prepare_enable(new_dram_alt_parent);
+       if (ret) {
+               dev_err(dev, "failed to enable new dram_alt parent: %d\n",
+                       ret);
+               goto out_disable_core_parent;
+       }
+       ret = clk_prepare_enable(new_dram_apb_parent);
+       if (ret) {
+               dev_err(dev, "failed to enable new dram_apb parent: %d\n",
+                       ret);
+               goto out_disable_alt_parent;
+       }
+
+       imx8m_ddrc_smc_set_freq(freq->smcarg);
+
+       /* update parents in clk tree after switch. */
+       ret = clk_set_parent(priv->dram_core, new_dram_core_parent);
+       if (ret)
+               dev_warn(dev, "failed to set dram_core parent: %d\n", ret);
+       if (new_dram_alt_parent) {
+               ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent);
+               if (ret)
+                       dev_warn(dev, "failed to set dram_alt parent: %d\n",
+                                ret);
+       }
+       if (new_dram_apb_parent) {
+               ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent);
+               if (ret)
+                       dev_warn(dev, "failed to set dram_apb parent: %d\n",
+                                ret);
+       }
+
+       /*
+        * Explicitly refresh dram PLL rate.
+        *
+        * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be
+        * automatically refreshed when clk_get_rate is called on children.
+        */
+       clk_get_rate(priv->dram_pll);
+
+       /*
+        * clk_set_parent transfer the reference count from old parent.
+        * now we drop extra reference counts used during the switch
+        */
+       clk_disable_unprepare(new_dram_apb_parent);
+out_disable_alt_parent:
+       clk_disable_unprepare(new_dram_alt_parent);
+out_disable_core_parent:
+       clk_disable_unprepare(new_dram_core_parent);
+out:
+       return ret;
+}
+
+static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+       struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+       struct imx8m_ddrc_freq *freq_info;
+       struct dev_pm_opp *new_opp;
+       unsigned long old_freq, new_freq;
+       int ret;
+
+       new_opp = devfreq_recommended_opp(dev, freq, flags);
+       if (IS_ERR(new_opp)) {
+               ret = PTR_ERR(new_opp);
+               dev_err(dev, "failed to get recommended opp: %d\n", ret);
+               return ret;
+       }
+       dev_pm_opp_put(new_opp);
+
+       old_freq = clk_get_rate(priv->dram_core);
+       if (*freq == old_freq)
+               return 0;
+
+       freq_info = imx8m_ddrc_find_freq(priv, *freq);
+       if (!freq_info)
+               return -EINVAL;
+
+       /*
+        * Read back the clk rate to verify switch was correct and so that
+        * we can report it on all error paths.
+        */
+       ret = imx8m_ddrc_set_freq(dev, freq_info);
+
+       new_freq = clk_get_rate(priv->dram_core);
+       if (ret)
+               dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n",
+                       *freq, old_freq, ret, new_freq);
+       else if (*freq != new_freq)
+               dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n",
+                       *freq, old_freq, new_freq);
+       else
+               dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n",
+                       *freq, old_freq);
+
+       return ret;
+}
+
+static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+       struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+       *freq = clk_get_rate(priv->dram_core);
+
+       return 0;
+}
+
+static int imx8m_ddrc_get_dev_status(struct device *dev,
+                                    struct devfreq_dev_status *stat)
+{
+       struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+       stat->busy_time = 0;
+       stat->total_time = 0;
+       stat->current_frequency = clk_get_rate(priv->dram_core);
+
+       return 0;
+}
+
+static int imx8m_ddrc_init_freq_info(struct device *dev)
+{
+       struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+       struct arm_smccc_res res;
+       int index;
+
+       /* An error here means DDR DVFS API not supported by firmware */
+       arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT,
+                       0, 0, 0, 0, 0, 0, &res);
+       priv->freq_count = res.a0;
+       if (priv->freq_count <= 0 ||
+                       priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT)
+               return -ENODEV;
+
+       for (index = 0; index < priv->freq_count; ++index) {
+               struct imx8m_ddrc_freq *freq = &priv->freq_table[index];
+
+               arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO,
+                             index, 0, 0, 0, 0, 0, &res);
+               /* Result should be strictly positive */
+               if ((long)res.a0 <= 0)
+                       return -ENODEV;
+
+               freq->rate = res.a0;
+               freq->smcarg = index;
+               freq->dram_core_parent_index = res.a1;
+               freq->dram_alt_parent_index = res.a2;
+               freq->dram_apb_parent_index = res.a3;
+
+               /* dram_core has 2 options: dram_pll or dram_alt_root */
+               if (freq->dram_core_parent_index != 1 &&
+                               freq->dram_core_parent_index != 2)
+                       return -ENODEV;
+               /* dram_apb and dram_alt have exactly 8 possible parents */
+               if (freq->dram_alt_parent_index > 8 ||
+                               freq->dram_apb_parent_index > 8)
+                       return -ENODEV;
+               /* dram_core from alt requires explicit dram_alt parent */
+               if (freq->dram_core_parent_index == 2 &&
+                               freq->dram_alt_parent_index == 0)
+                       return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int imx8m_ddrc_check_opps(struct device *dev)
+{
+       struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+       struct imx8m_ddrc_freq *freq_info;
+       struct dev_pm_opp *opp;
+       unsigned long freq;
+       int i, opp_count;
+
+       /* Enumerate DT OPPs and disable those not supported by firmware */
+       opp_count = dev_pm_opp_get_opp_count(dev);
+       if (opp_count < 0)
+               return opp_count;
+       for (i = 0, freq = 0; i < opp_count; ++i, ++freq) {
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+               if (IS_ERR(opp)) {
+                       dev_err(dev, "Failed enumerating OPPs: %ld\n",
+                               PTR_ERR(opp));
+                       return PTR_ERR(opp);
+               }
+               dev_pm_opp_put(opp);
+
+               freq_info = imx8m_ddrc_find_freq(priv, freq);
+               if (!freq_info) {
+                       dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n",
+                                       freq, DIV_ROUND_CLOSEST(freq, 250000));
+                       dev_pm_opp_disable(dev, freq);
+               }
+       }
+
+       return 0;
+}
+
+static void imx8m_ddrc_exit(struct device *dev)
+{
+       dev_pm_opp_of_remove_table(dev);
+}
+
+static int imx8m_ddrc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct imx8m_ddrc *priv;
+       const char *gov = DEVFREQ_GOV_USERSPACE;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, priv);
+
+       ret = imx8m_ddrc_init_freq_info(dev);
+       if (ret) {
+               dev_err(dev, "failed to init firmware freq info: %d\n", ret);
+               return ret;
+       }
+
+       priv->dram_core = devm_clk_get(dev, "core");
+       if (IS_ERR(priv->dram_core)) {
+               ret = PTR_ERR(priv->dram_core);
+               dev_err(dev, "failed to fetch core clock: %d\n", ret);
+               return ret;
+       }
+       priv->dram_pll = devm_clk_get(dev, "pll");
+       if (IS_ERR(priv->dram_pll)) {
+               ret = PTR_ERR(priv->dram_pll);
+               dev_err(dev, "failed to fetch pll clock: %d\n", ret);
+               return ret;
+       }
+       priv->dram_alt = devm_clk_get(dev, "alt");
+       if (IS_ERR(priv->dram_alt)) {
+               ret = PTR_ERR(priv->dram_alt);
+               dev_err(dev, "failed to fetch alt clock: %d\n", ret);
+               return ret;
+       }
+       priv->dram_apb = devm_clk_get(dev, "apb");
+       if (IS_ERR(priv->dram_apb)) {
+               ret = PTR_ERR(priv->dram_apb);
+               dev_err(dev, "failed to fetch apb clock: %d\n", ret);
+               return ret;
+       }
+
+       ret = dev_pm_opp_of_add_table(dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to get OPP table\n");
+               return ret;
+       }
+
+       ret = imx8m_ddrc_check_opps(dev);
+       if (ret < 0)
+               goto err;
+
+       priv->profile.polling_ms = 1000;
+       priv->profile.target = imx8m_ddrc_target;
+       priv->profile.get_dev_status = imx8m_ddrc_get_dev_status;
+       priv->profile.exit = imx8m_ddrc_exit;
+       priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq;
+       priv->profile.initial_freq = clk_get_rate(priv->dram_core);
+
+       priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+                                               gov, NULL);
+       if (IS_ERR(priv->devfreq)) {
+               ret = PTR_ERR(priv->devfreq);
+               dev_err(dev, "failed to add devfreq device: %d\n", ret);
+               goto err;
+       }
+
+       return 0;
+
+err:
+       dev_pm_opp_of_remove_table(dev);
+       return ret;
+}
+
+static const struct of_device_id imx8m_ddrc_of_match[] = {
+       { .compatible = "fsl,imx8m-ddrc", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match);
+
+static struct platform_driver imx8m_ddrc_platdrv = {
+       .probe          = imx8m_ddrc_probe,
+       .driver = {
+               .name   = "imx8m-ddrc-devfreq",
+               .of_match_table = of_match_ptr(imx8m_ddrc_of_match),
+       },
+};
+module_platform_driver(imx8m_ddrc_platdrv);
+
+MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
index 2e65d7279d79e24daf42cf6522a4f3ca90546210..24f04f78285b7dd9ade185b07a8a26be96cf9d3e 100644 (file)
@@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
                        if (res.a0) {
                                dev_err(dev, "Failed to set dram param: %ld\n",
                                        res.a0);
-                               return -EINVAL;
+                               ret = -EINVAL;
+                               goto err_edev;
                        }
                }
        }
@@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
        node = of_parse_phandle(np, "rockchip,pmu", 0);
        if (node) {
                data->regmap_pmu = syscon_node_to_regmap(node);
-               if (IS_ERR(data->regmap_pmu))
-                       return PTR_ERR(data->regmap_pmu);
+               of_node_put(node);
+               if (IS_ERR(data->regmap_pmu)) {
+                       ret = PTR_ERR(data->regmap_pmu);
+                       goto err_edev;
+               }
        }
 
        regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
@@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
                data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
                break;
        default:
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_edev;
        };
 
        arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
@@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
         */
        if (dev_pm_opp_of_add_table(dev)) {
                dev_err(dev, "Invalid operating-points in device tree.\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_edev;
        }
 
        of_property_read_u32(np, "upthreshold",
@@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
 
 err_free_opp:
        dev_pm_opp_of_remove_table(&pdev->dev);
+err_edev:
+       devfreq_event_disable_edev(data->edev);
+
        return ret;
 }
 
index 76fb072c22dc403ef5bb34c37472bd3fc1533415..5a5a1da01a00f3fbc8ca6e2e51a8dea69003342a 100644 (file)
@@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
        a_fences = get_fences(a, &a_num_fences);
        b_fences = get_fences(b, &b_num_fences);
        if (a_num_fences > INT_MAX - b_num_fences)
-               return NULL;
+               goto err;
 
        num_fences = a_num_fences + b_num_fences;
 
index 6fa1eba9d4778775c57bdbef8f3c0bbb57b79bfc..5142da401db3fb5939dedd7e14be1ddefdc6621f 100644 (file)
@@ -239,6 +239,14 @@ config FSL_RAID
          the capability to offload memcpy, xor and pq computation
          for raid5/6.
 
+config HISI_DMA
+       tristate "HiSilicon DMA Engine support"
+       depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Support HiSilicon Kunpeng DMA engine.
+
 config IMG_MDC_DMA
        tristate "IMG MDC support"
        depends on MIPS || COMPILE_TEST
@@ -273,6 +281,19 @@ config INTEL_IDMA64
          Enable DMA support for Intel Low Power Subsystem such as found on
          Intel Skylake PCH.
 
+config INTEL_IDXD
+       tristate "Intel Data Accelerators support"
+       depends on PCI && X86_64
+       select DMA_ENGINE
+       select SBITMAP
+       help
+         Enable support for the Intel(R) data accelerators present
+         in Intel Xeon CPU.
+
+         Say Y if you have such a platform.
+
+         If unsure, say N.
+
 config INTEL_IOATDMA
        tristate "Intel I/OAT DMA support"
        depends on PCI && X86_64
@@ -497,6 +518,15 @@ config PXA_DMA
          16 to 32 channels for peripheral to memory or memory to memory
          transfers.
 
+config PLX_DMA
+       tristate "PLX ExpressLane PEX Switch DMA Engine Support"
+       depends on PCI
+       select DMA_ENGINE
+       help
+         Some PLX ExpressLane PCI Switches support additional DMA engines.
+         These are exposed via extra functions on the switch's
+         upstream port. Each function exposes one DMA channel.
+
 config SIRF_DMA
        tristate "CSR SiRFprimaII/SiRFmarco DMA support"
        depends on ARCH_SIRF
index 42d7e2fc64faf8a4ac7100f78e060a598eb11eec..1d908394fbeac5a4e6c0293b1cbeb89962084e09 100644 (file)
@@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
 obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
 obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HISI_DMA) += hisi_dma.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_INTEL_IDMA64) += idma64.o
 obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IDXD) += idxd/
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
@@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
 obj-$(CONFIG_OWL_DMA) += owl-dma.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PLX_DMA) += plx_dma.o
 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
 obj-$(CONFIG_PXA_DMA) += pxa_dma.o
 obj-$(CONFIG_RENESAS_DMA) += sh/
index 832aefbe7af96d321e41b4261d966b44beffa3fe..539e785039cacfa610be9a8ff5786e6637abb460 100644 (file)
@@ -772,10 +772,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
                return -EBUSY;
        }
 
-       *ptr = devm_ioremap_nocache(device, region->start,
+       *ptr = devm_ioremap(device, region->start,
                                    resource_size(region));
        if (*ptr == NULL) {
-               dev_err(device, "ioremap_nocache of %s failed!", name);
+               dev_err(device, "ioremap of %s failed!", name);
                return -ENOMEM;
        }
 
index e4c593f48575c22b6a22c1d14cb05aa03fe675c8..4768ef26013b209edf02ab6bb6eabbaf6af1463c 100644 (file)
@@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 
        /* stop DMA activity */
        if (c->desc) {
-               if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
-                       vchan_terminate_vdesc(&c->desc->vd);
-               else
-                       vchan_vdesc_fini(&c->desc->vd);
+               vchan_terminate_vdesc(&c->desc->vd);
                c->desc = NULL;
                bcm2835_dma_abort(c);
        }
index a0ee404b736ed966098c94e170fd62a71bfb6a5e..f1d149e328395635fc2a427a7921f22c04672f87 100644 (file)
@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
        struct dma_device *dma_dev;
        struct axi_dmac *dmac;
        struct resource *res;
+       struct regmap *regmap;
        int ret;
 
        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dmac);
 
-       devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+       regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
+                &axi_dmac_regmap_config);
+       if (IS_ERR(regmap)) {
+               ret = PTR_ERR(regmap);
+               goto err_free_irq;
+       }
 
        return 0;
 
+err_free_irq:
+       free_irq(dmac->irq, dmac);
 err_unregister_of:
        of_dma_controller_free(pdev->dev.of_node);
 err_unregister_device:
index fa626acdc9b961919208a316d719ad9d2f25118b..448f663da89c69e7bd0b4d9bfa135174a28e0bc0 100644 (file)
@@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
        .nb_channels = 6,
        .transfer_ord_max = 5,
-       .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+       .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
+                JZ_SOC_DATA_BREAK_LINKS,
 };
 
 static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
@@ -1020,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
        .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
 };
 
+static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
+       .nb_channels = 32,
+       .transfer_ord_max = 7,
+       .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
 static const struct of_device_id jz4780_dma_dt_match[] = {
        { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
        { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
        { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
        { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
        { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
+       { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
        {},
 };
 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
index 03ac4b96117cd8dbf423998c6fa3059cdb532dfa..f3ef4edd4de178dea5e920cb8df4d467d937b800 100644 (file)
@@ -60,6 +60,8 @@ static long dmaengine_ref_count;
 
 /* --- sysfs implementation --- */
 
+#define DMA_SLAVE_NAME "slave"
+
 /**
  * dev_to_dma_chan - convert a device pointer to its sysfs container object
  * @dev - device node
@@ -164,11 +166,152 @@ static struct class dma_devclass = {
 
 /* --- client and device registration --- */
 
-#define dma_device_satisfies_mask(device, mask) \
-       __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
-                           const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+       struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+       enum dma_transaction_type cap;
+       int err = 0;
+
+       bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+       /* 'interrupt', 'private', and 'slave' are channel capabilities,
+        * but are not associated with an operation so they do not need
+        * an entry in the channel_table
+        */
+       clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+       clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+       clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+       for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+               channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+               if (!channel_table[cap]) {
+                       err = -ENOMEM;
+                       break;
+               }
+       }
+
+       if (err) {
+               pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+               for_each_dma_cap_mask(cap, dma_cap_mask_all)
+                       free_percpu(channel_table[cap]);
+       }
+
+       return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ *     the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+       int node = dev_to_node(chan->device->dev);
+       return node == NUMA_NO_NODE ||
+               cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ *     the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+       struct dma_device *device;
+       struct dma_chan *chan;
+       struct dma_chan *min = NULL;
+       struct dma_chan *localmin = NULL;
+
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (!dma_has_cap(cap, device->cap_mask) ||
+                   dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node) {
+                       if (!chan->client_count)
+                               continue;
+                       if (!min || chan->table_count < min->table_count)
+                               min = chan;
+
+                       if (dma_chan_is_local(chan, cpu))
+                               if (!localmin ||
+                                   chan->table_count < localmin->table_count)
+                                       localmin = chan;
+               }
+       }
+
+       chan = localmin ? localmin : min;
+
+       if (chan)
+               chan->table_count++;
+
+       return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case,  and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.  Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+       struct dma_chan *chan;
+       struct dma_device *device;
+       int cpu;
+       int cap;
+
+       /* undo the last distribution */
+       for_each_dma_cap_mask(cap, dma_cap_mask_all)
+               for_each_possible_cpu(cpu)
+                       per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node)
+                       chan->table_count = 0;
+       }
+
+       /* don't populate the channel_table if no clients are available */
+       if (!dmaengine_ref_count)
+               return;
+
+       /* redistribute available channels */
+       for_each_dma_cap_mask(cap, dma_cap_mask_all)
+               for_each_online_cpu(cpu) {
+                       chan = min_chan(cap, cpu);
+                       per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+               }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+                                    const dma_cap_mask_t *want)
 {
        dma_cap_mask_t has;
 
@@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
 
 static struct module *dma_chan_to_owner(struct dma_chan *chan)
 {
-       return chan->device->dev->driver->owner;
+       return chan->device->owner;
 }
 
 /**
@@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan)
        }
 }
 
+static void dma_device_release(struct kref *ref)
+{
+       struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+       list_del_rcu(&device->global_node);
+       dma_channel_rebalance();
+
+       if (device->device_release)
+               device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+       lockdep_assert_held(&dma_list_mutex);
+       kref_put(&device->ref, dma_device_release);
+}
+
 /**
  * dma_chan_get - try to grab a dma channel's parent driver module
  * @chan - channel to grab
@@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan)
        if (!try_module_get(owner))
                return -ENODEV;
 
+       ret = kref_get_unless_zero(&chan->device->ref);
+       if (!ret) {
+               ret = -ENODEV;
+               goto module_put_out;
+       }
+
        /* allocate upon first client reference */
        if (chan->device->device_alloc_chan_resources) {
                ret = chan->device->device_alloc_chan_resources(chan);
@@ -233,6 +399,8 @@ out:
        return 0;
 
 err_out:
+       dma_device_put(chan->device);
+module_put_out:
        module_put(owner);
        return ret;
 }
@@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan)
                return;
 
        chan->client_count--;
-       module_put(dma_chan_to_owner(chan));
 
        /* This channel is not in use anymore, free it */
        if (!chan->client_count && chan->device->device_free_chan_resources) {
@@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan)
                chan->router = NULL;
                chan->route_data = NULL;
        }
+
+       dma_device_put(chan->device);
+       module_put(dma_chan_to_owner(chan));
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -288,57 +458,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 }
 EXPORT_SYMBOL(dma_sync_wait);
 
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
-       struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
-       enum dma_transaction_type cap;
-       int err = 0;
-
-       bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
-       /* 'interrupt', 'private', and 'slave' are channel capabilities,
-        * but are not associated with an operation so they do not need
-        * an entry in the channel_table
-        */
-       clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
-       clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
-       clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
-       for_each_dma_cap_mask(cap, dma_cap_mask_all) {
-               channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
-               if (!channel_table[cap]) {
-                       err = -ENOMEM;
-                       break;
-               }
-       }
-
-       if (err) {
-               pr_err("initialization failure\n");
-               for_each_dma_cap_mask(cap, dma_cap_mask_all)
-                       free_percpu(channel_table[cap]);
-       }
-
-       return err;
-}
-arch_initcall(dma_channel_table_init);
-
 /**
  * dma_find_channel - find a channel to carry out the operation
  * @tx_type: transaction type
@@ -369,97 +488,6 @@ void dma_issue_pending_all(void)
 }
 EXPORT_SYMBOL(dma_issue_pending_all);
 
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
-       int node = dev_to_node(chan->device->dev);
-       return node == NUMA_NO_NODE ||
-               cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
-       struct dma_device *device;
-       struct dma_chan *chan;
-       struct dma_chan *min = NULL;
-       struct dma_chan *localmin = NULL;
-
-       list_for_each_entry(device, &dma_device_list, global_node) {
-               if (!dma_has_cap(cap, device->cap_mask) ||
-                   dma_has_cap(DMA_PRIVATE, device->cap_mask))
-                       continue;
-               list_for_each_entry(chan, &device->channels, device_node) {
-                       if (!chan->client_count)
-                               continue;
-                       if (!min || chan->table_count < min->table_count)
-                               min = chan;
-
-                       if (dma_chan_is_local(chan, cpu))
-                               if (!localmin ||
-                                   chan->table_count < localmin->table_count)
-                                       localmin = chan;
-               }
-       }
-
-       chan = localmin ? localmin : min;
-
-       if (chan)
-               chan->table_count++;
-
-       return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case,  and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case.  Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
-       struct dma_chan *chan;
-       struct dma_device *device;
-       int cpu;
-       int cap;
-
-       /* undo the last distribution */
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               for_each_possible_cpu(cpu)
-                       per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
-       list_for_each_entry(device, &dma_device_list, global_node) {
-               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
-                       continue;
-               list_for_each_entry(chan, &device->channels, device_node)
-                       chan->table_count = 0;
-       }
-
-       /* don't populate the channel_table if no clients are available */
-       if (!dmaengine_ref_count)
-               return;
-
-       /* redistribute available channels */
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               for_each_online_cpu(cpu) {
-                       chan = min_chan(cap, cpu);
-                       per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
-               }
-}
-
 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 {
        struct dma_device *device;
@@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 {
        struct dma_chan *chan;
 
-       if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+       if (mask && !dma_device_satisfies_mask(dev, mask)) {
                dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
                return NULL;
        }
@@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
        if (has_acpi_companion(dev) && !chan)
                chan = acpi_dma_request_slave_chan_by_name(dev, name);
 
-       if (chan) {
-               /* Valid channel found or requester needs to be deferred */
-               if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
-                       return chan;
-       }
+       if (PTR_ERR(chan) == -EPROBE_DEFER)
+               return chan;
+
+       if (!IS_ERR_OR_NULL(chan))
+               goto found;
 
        /* Try to find the channel via the DMA filter map(s) */
        mutex_lock(&dma_list_mutex);
@@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
        }
        mutex_unlock(&dma_list_mutex);
 
-       return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+       if (!IS_ERR_OR_NULL(chan))
+               goto found;
+
+       return ERR_PTR(-EPROBE_DEFER);
+
+found:
+       chan->slave = dev;
+       chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+       if (!chan->name)
+               return ERR_PTR(-ENOMEM);
+
+       if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+                             DMA_SLAVE_NAME))
+               dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+       if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+               dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+       return chan;
 }
 EXPORT_SYMBOL_GPL(dma_request_chan);
 
@@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan)
        /* drop PRIVATE cap enabled by __dma_request_channel() */
        if (--chan->device->privatecnt == 0)
                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+       if (chan->slave) {
+               sysfs_remove_link(&chan->slave->kobj, chan->name);
+               kfree(chan->name);
+               chan->name = NULL;
+               chan->slave = NULL;
+       }
+       sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
        mutex_unlock(&dma_list_mutex);
 }
 EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get);
  */
 void dmaengine_put(void)
 {
-       struct dma_device *device;
+       struct dma_device *device, *_d;
        struct dma_chan *chan;
 
        mutex_lock(&dma_list_mutex);
        dmaengine_ref_count--;
        BUG_ON(dmaengine_ref_count < 0);
        /* drop channel references */
-       list_for_each_entry(device, &dma_device_list, global_node) {
+       list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
                        continue;
                list_for_each_entry(chan, &device->channels, device_node)
@@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device)
        return 0;
 }
 
+static int __dma_async_device_channel_register(struct dma_device *device,
+                                              struct dma_chan *chan,
+                                              int chan_id)
+{
+       int rc = 0;
+       int chancnt = device->chancnt;
+       atomic_t *idr_ref;
+       struct dma_chan *tchan;
+
+       tchan = list_first_entry_or_null(&device->channels,
+                                        struct dma_chan, device_node);
+       if (tchan->dev) {
+               idr_ref = tchan->dev->idr_ref;
+       } else {
+               idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+               if (!idr_ref)
+                       return -ENOMEM;
+               atomic_set(idr_ref, 0);
+       }
+
+       chan->local = alloc_percpu(typeof(*chan->local));
+       if (!chan->local)
+               goto err_out;
+       chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+       if (!chan->dev) {
+               free_percpu(chan->local);
+               chan->local = NULL;
+               goto err_out;
+       }
+
+       /*
+        * When the chan_id is a negative value, we are dynamically adding
+        * the channel. Otherwise we are static enumerating.
+        */
+       chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+       chan->dev->device.class = &dma_devclass;
+       chan->dev->device.parent = device->dev;
+       chan->dev->chan = chan;
+       chan->dev->idr_ref = idr_ref;
+       chan->dev->dev_id = device->dev_id;
+       atomic_inc(idr_ref);
+       dev_set_name(&chan->dev->device, "dma%dchan%d",
+                    device->dev_id, chan->chan_id);
+
+       rc = device_register(&chan->dev->device);
+       if (rc)
+               goto err_out;
+       chan->client_count = 0;
+       device->chancnt = chan->chan_id + 1;
+
+       return 0;
+
+ err_out:
+       free_percpu(chan->local);
+       kfree(chan->dev);
+       if (atomic_dec_return(idr_ref) == 0)
+               kfree(idr_ref);
+       return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+                                     struct dma_chan *chan)
+{
+       int rc;
+
+       rc = __dma_async_device_channel_register(device, chan, -1);
+       if (rc < 0)
+               return rc;
+
+       dma_channel_rebalance();
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+                                                 struct dma_chan *chan)
+{
+       WARN_ONCE(!device->device_release && chan->client_count,
+                 "%s called while %d clients hold a reference\n",
+                 __func__, chan->client_count);
+       mutex_lock(&dma_list_mutex);
+       list_del(&chan->device_node);
+       device->chancnt--;
+       chan->dev->chan = NULL;
+       mutex_unlock(&dma_list_mutex);
+       device_unregister(&chan->dev->device);
+       free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+                                        struct dma_chan *chan)
+{
+       __dma_async_device_channel_unregister(device, chan);
+       dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
 /**
  * dma_async_device_register - registers DMA devices found
  * @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
  */
 int dma_async_device_register(struct dma_device *device)
 {
-       int chancnt = 0, rc;
+       int rc, i = 0;
        struct dma_chan* chan;
-       atomic_t *idr_ref;
 
        if (!device)
                return -ENODEV;
@@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device)
                return -EIO;
        }
 
+       device->owner = device->dev->driver->owner;
+
        if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
                dev_err(device->dev,
                        "Device claims capability %s, but op is not defined\n",
@@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device)
                return -EIO;
        }
 
+       if (!device->device_release)
+               dev_warn(device->dev,
+                        "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+       kref_init(&device->ref);
+
        /* note: this only matters in the
         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
         */
        if (device_has_all_tx_types(device))
                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 
-       idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
-       if (!idr_ref)
-               return -ENOMEM;
        rc = get_dma_id(device);
-       if (rc != 0) {
-               kfree(idr_ref);
+       if (rc != 0)
                return rc;
-       }
-
-       atomic_set(idr_ref, 0);
 
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
-               rc = -ENOMEM;
-               chan->local = alloc_percpu(typeof(*chan->local));
-               if (chan->local == NULL)
-                       goto err_out;
-               chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
-               if (chan->dev == NULL) {
-                       free_percpu(chan->local);
-                       chan->local = NULL;
+               rc = __dma_async_device_channel_register(device, chan, i++);
+               if (rc < 0)
                        goto err_out;
-               }
-
-               chan->chan_id = chancnt++;
-               chan->dev->device.class = &dma_devclass;
-               chan->dev->device.parent = device->dev;
-               chan->dev->chan = chan;
-               chan->dev->idr_ref = idr_ref;
-               chan->dev->dev_id = device->dev_id;
-               atomic_inc(idr_ref);
-               dev_set_name(&chan->dev->device, "dma%dchan%d",
-                            device->dev_id, chan->chan_id);
-
-               rc = device_register(&chan->dev->device);
-               if (rc) {
-                       free_percpu(chan->local);
-                       chan->local = NULL;
-                       kfree(chan->dev);
-                       atomic_dec(idr_ref);
-                       goto err_out;
-               }
-               chan->client_count = 0;
-       }
-
-       if (!chancnt) {
-               dev_err(device->dev, "%s: device has no channels!\n", __func__);
-               rc = -ENODEV;
-               goto err_out;
        }
 
-       device->chancnt = chancnt;
-
        mutex_lock(&dma_list_mutex);
        /* take references on public channels */
        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device)
 
 err_out:
        /* if we never registered a channel just release the idr */
-       if (atomic_read(idr_ref) == 0) {
+       if (!device->chancnt) {
                ida_free(&dma_ida, device->dev_id);
-               kfree(idr_ref);
                return rc;
        }
 
@@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register);
  */
 void dma_async_device_unregister(struct dma_device *device)
 {
-       struct dma_chan *chan;
+       struct dma_chan *chan, *n;
+
+       list_for_each_entry_safe(chan, n, &device->channels, device_node)
+               __dma_async_device_channel_unregister(device, chan);
 
        mutex_lock(&dma_list_mutex);
-       list_del_rcu(&device->global_node);
+       /*
+        * setting DMA_PRIVATE ensures the device being torn down will not
+        * be used in the channel_table
+        */
+       dma_cap_set(DMA_PRIVATE, device->cap_mask);
        dma_channel_rebalance();
+       dma_device_put(device);
        mutex_unlock(&dma_list_mutex);
-
-       list_for_each_entry(chan, &device->channels, device_node) {
-               WARN_ONCE(chan->client_count,
-                         "%s called while %d clients hold a reference\n",
-                         __func__, chan->client_count);
-               mutex_lock(&dma_list_mutex);
-               chan->dev->chan = NULL;
-               mutex_unlock(&dma_list_mutex);
-               device_unregister(&chan->dev->device);
-               free_percpu(chan->local);
-       }
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
 
@@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 }
 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 
+static inline int desc_check_and_set_metadata_mode(
+       struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+       /* Make sure that the metadata mode is not mixed */
+       if (!desc->desc_metadata_mode) {
+               if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+                       desc->desc_metadata_mode = mode;
+               else
+                       return -ENOTSUPP;
+       } else if (desc->desc_metadata_mode != mode) {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+                                  void *data, size_t len)
+{
+       int ret;
+
+       if (!desc)
+               return -EINVAL;
+
+       ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+       if (ret)
+               return ret;
+
+       if (!desc->metadata_ops || !desc->metadata_ops->attach)
+               return -ENOTSUPP;
+
+       return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+                                     size_t *payload_len, size_t *max_len)
+{
+       int ret;
+
+       if (!desc)
+               return ERR_PTR(-EINVAL);
+
+       ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+               return ERR_PTR(-ENOTSUPP);
+
+       return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+                                   size_t payload_len)
+{
+       int ret;
+
+       if (!desc)
+               return -EINVAL;
+
+       ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+       if (ret)
+               return ret;
+
+       if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+               return -ENOTSUPP;
+
+       return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
 /* dma_wait_for_async_tx - spin wait for a transaction to complete
  * @tx: in-flight transaction to wait on
  */
@@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void)
        return class_register(&dma_devclass);
 }
 arch_initcall(dma_bus_init);
-
-
index 501c0b063f852d9a38a619940699d71b146399f4..e8a320c9e57c2d7448c2d67e20bda82178ac4d35 100644 (file)
@@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
                state->last = complete;
                state->used = used;
                state->residue = 0;
+               state->in_flight_bytes = 0;
        }
        return dma_async_is_complete(cookie, complete, used);
 }
@@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
                state->residue = residue;
 }
 
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+                                          u32 in_flight_bytes)
+{
+       if (state)
+               state->in_flight_bytes = in_flight_bytes;
+}
+
 struct dmaengine_desc_callback {
        dma_async_tx_callback callback;
        dma_async_tx_callback_result callback_result;
@@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
        return (cb->callback) ? true : false;
 }
 
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
 #endif
index a1ce307c502fa02f4d5106d21993176811360cee..14c1ac26f8664a5002005af7298488d725979107 100644 (file)
@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
 
        vchan_get_all_descriptors(&chan->vc, &head);
 
-       /*
-        * As vchan_dma_desc_free_list can access to desc_allocated list
-        * we need to call it in vc.lock context.
-        */
-       vchan_dma_desc_free_list(&chan->vc, &head);
-
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&chan->vc, &head);
+
        dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
 
        return 0;
index b1a7ca91701a80365e74e2f0d6669b8816297629..5697c3622699bd64093541a971193bed495cc932 100644 (file)
@@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
        u32 ch = fsl_chan->vchan.chan.chan_id;
        void __iomem *muxaddr;
        unsigned int chans_per_mux, ch_off;
+       int endian_diff[4] = {3, 1, -1, -3};
        u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
 
        chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
        ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+       if (fsl_chan->edma->drvdata->mux_swap)
+               ch_off += endian_diff[ch_off % 4];
+
        muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
        slot = EDMAMUX_CHCFG_SOURCE(slot);
 
index 5eaa2902ed3919e370f6c77cb401b2ca21f5781f..67e422590c9aedcfbbeedfa4a9ecf8a557a6dbba 100644 (file)
@@ -147,6 +147,7 @@ struct fsl_edma_drvdata {
        enum edma_version       version;
        u32                     dmamuxs;
        bool                    has_dmaclk;
+       bool                    mux_swap;
        int                     (*setup_irq)(struct platform_device *pdev,
                                             struct fsl_edma_engine *fsl_edma);
 };
index b626c06ac2e0bb3d502c5e9625caa9b26e11f9c1..eff7ebd8cf356172f8c5d4b15215d192b8821dff 100644 (file)
@@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = {
        .setup_irq = fsl_edma_irq_init,
 };
 
+static struct fsl_edma_drvdata ls1028a_data = {
+       .version = v1,
+       .dmamuxs = DMAMUX_NR,
+       .mux_swap = true,
+       .setup_irq = fsl_edma_irq_init,
+};
+
 static struct fsl_edma_drvdata imx7ulp_data = {
        .version = v3,
        .dmamuxs = 1,
@@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = {
 
 static const struct of_device_id fsl_edma_dt_ids[] = {
        { .compatible = "fsl,vf610-edma", .data = &vf610_data},
+       { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
        { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
        { /* sentinel */ }
 };
index 89792083d62c51749023dada82b34e48ab7c07c3..95cc0256b38782983845216bafcb502a094354c4 100644 (file)
@@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
 
        vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 
-       if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+       if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
                return;
 
        list_for_each_entry_safe(comp_temp, _comp_temp,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
new file mode 100644 (file)
index 0000000..ed36192
--- /dev/null
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019 HiSilicon Limited. */
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "virt-dma.h"
+
+#define HISI_DMA_SQ_BASE_L             0x0
+#define HISI_DMA_SQ_BASE_H             0x4
+#define HISI_DMA_SQ_DEPTH              0x8
+#define HISI_DMA_SQ_TAIL_PTR           0xc
+#define HISI_DMA_CQ_BASE_L             0x10
+#define HISI_DMA_CQ_BASE_H             0x14
+#define HISI_DMA_CQ_DEPTH              0x18
+#define HISI_DMA_CQ_HEAD_PTR           0x1c
+#define HISI_DMA_CTRL0                 0x20
+#define HISI_DMA_CTRL0_QUEUE_EN_S      0
+#define HISI_DMA_CTRL0_QUEUE_PAUSE_S   4
+#define HISI_DMA_CTRL1                 0x24
+#define HISI_DMA_CTRL1_QUEUE_RESET_S   0
+#define HISI_DMA_Q_FSM_STS             0x30
+#define HISI_DMA_FSM_STS_MASK          GENMASK(3, 0)
+#define HISI_DMA_INT_STS               0x40
+#define HISI_DMA_INT_STS_MASK          GENMASK(12, 0)
+#define HISI_DMA_INT_MSK               0x44
+#define HISI_DMA_MODE                  0x217c
+#define HISI_DMA_OFFSET                        0x100
+
+#define HISI_DMA_MSI_NUM               30
+#define HISI_DMA_CHAN_NUM              30
+#define HISI_DMA_Q_DEPTH_VAL           1024
+
+#define PCI_BAR_2                      2
+
+enum hisi_dma_mode {
+       EP = 0,
+       RC,
+};
+
+enum hisi_dma_chan_status {
+       DISABLE = -1,
+       IDLE = 0,
+       RUN,
+       CPL,
+       PAUSE,
+       HALT,
+       ABORT,
+       WAIT,
+       BUFFCLR,
+};
+
+struct hisi_dma_sqe {
+       __le32 dw0;
+#define OPCODE_MASK                    GENMASK(3, 0)
+#define OPCODE_SMALL_PACKAGE           0x1
+#define OPCODE_M2M                     0x4
+#define LOCAL_IRQ_EN                   BIT(8)
+#define ATTR_SRC_MASK                  GENMASK(14, 12)
+       __le32 dw1;
+       __le32 dw2;
+#define ATTR_DST_MASK                  GENMASK(26, 24)
+       __le32 length;
+       __le64 src_addr;
+       __le64 dst_addr;
+};
+
+struct hisi_dma_cqe {
+       __le32 rsv0;
+       __le32 rsv1;
+       __le16 sq_head;
+       __le16 rsv2;
+       __le16 rsv3;
+       __le16 w0;
+#define STATUS_MASK                    GENMASK(15, 1)
+#define STATUS_SUCC                    0x0
+#define VALID_BIT                      BIT(0)
+};
+
+struct hisi_dma_desc {
+       struct virt_dma_desc vd;
+       struct hisi_dma_sqe sqe;
+};
+
+struct hisi_dma_chan {
+       struct virt_dma_chan vc;
+       struct hisi_dma_dev *hdma_dev;
+       struct hisi_dma_sqe *sq;
+       struct hisi_dma_cqe *cq;
+       dma_addr_t sq_dma;
+       dma_addr_t cq_dma;
+       u32 sq_tail;
+       u32 cq_head;
+       u32 qp_num;
+       enum hisi_dma_chan_status status;
+       struct hisi_dma_desc *desc;
+};
+
+struct hisi_dma_dev {
+       struct pci_dev *pdev;
+       void __iomem *base;
+       struct dma_device dma_dev;
+       u32 chan_num;
+       u32 chan_depth;
+       struct hisi_dma_chan chan[];
+};
+
+static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct hisi_dma_chan, vc.chan);
+}
+
+static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct hisi_dma_desc, vd);
+}
+
+static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
+                                      u32 val)
+{
+       writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+}
+
+static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
+{
+       u32 tmp;
+
+       tmp = readl_relaxed(addr);
+       tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+       writel_relaxed(tmp, addr);
+}
+
+static void hisi_dma_free_irq_vectors(void *data)
+{
+       pci_free_irq_vectors(data);
+}
+
+static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+                              bool pause)
+{
+       void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+                            HISI_DMA_OFFSET;
+
+       hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+}
+
+static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+                               bool enable)
+{
+       void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+                            HISI_DMA_OFFSET;
+
+       hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+}
+
+static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
+                           HISI_DMA_INT_STS_MASK);
+}
+
+static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       void __iomem *base = hdma_dev->base;
+
+       hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
+                           HISI_DMA_INT_STS_MASK);
+       hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+}
+
+static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+       void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
+                            HISI_DMA_OFFSET;
+
+       hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+}
+
+static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+{
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+       u32 index = chan->qp_num, tmp;
+       int ret;
+
+       hisi_dma_pause_dma(hdma_dev, index, true);
+       hisi_dma_enable_dma(hdma_dev, index, false);
+       hisi_dma_mask_irq(hdma_dev, index);
+
+       ret = readl_relaxed_poll_timeout(hdma_dev->base +
+               HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+               FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
+               WARN_ON(1);
+       }
+
+       hisi_dma_do_reset(hdma_dev, index);
+       hisi_dma_reset_qp_point(hdma_dev, index);
+       hisi_dma_pause_dma(hdma_dev, index, false);
+       hisi_dma_enable_dma(hdma_dev, index, true);
+       hisi_dma_unmask_irq(hdma_dev, index);
+
+       ret = readl_relaxed_poll_timeout(hdma_dev->base +
+               HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+               FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
+               WARN_ON(1);
+       }
+}
+
+static void hisi_dma_free_chan_resources(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+
+       hisi_dma_reset_hw_chan(chan);
+       vchan_free_chan_resources(&chan->vc);
+
+       memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
+       memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
+       chan->sq_tail = 0;
+       chan->cq_head = 0;
+       chan->status = DISABLE;
+}
+
+static void hisi_dma_desc_free(struct virt_dma_desc *vd)
+{
+       kfree(to_hisi_dma_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *
+hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
+                        size_t len, unsigned long flags)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       struct hisi_dma_desc *desc;
+
+       desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+
+       desc->sqe.length = cpu_to_le32(len);
+       desc->sqe.src_addr = cpu_to_le64(src);
+       desc->sqe.dst_addr = cpu_to_le64(dst);
+
+       return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+}
+
+static enum dma_status
+hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+                  struct dma_tx_state *txstate)
+{
+       return dma_cookie_status(c, cookie, txstate);
+}
+
+static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
+{
+       struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+       struct hisi_dma_desc *desc;
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&chan->vc);
+       if (!vd) {
+               dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
+               chan->desc = NULL;
+               return;
+       }
+       list_del(&vd->node);
+       desc = to_hisi_dma_desc(vd);
+       chan->desc = desc;
+
+       memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
+
+       /* update other field in sqe */
+       sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
+       sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
+
+       /* make sure data has been updated in sqe */
+       wmb();
+
+       /* update sq tail, point to new sqe position */
+       chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
+
+       /* update sq_tail to trigger a new task */
+       hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
+                           chan->sq_tail);
+}
+
+static void hisi_dma_issue_pending(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       if (vchan_issue_pending(&chan->vc))
+               hisi_dma_start_transfer(chan);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int hisi_dma_terminate_all(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
+       if (chan->desc) {
+               vchan_terminate_vdesc(&chan->desc->vd);
+               chan->desc = NULL;
+       }
+
+       vchan_get_all_descriptors(&chan->vc, &head);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       vchan_dma_desc_free_list(&chan->vc, &head);
+       hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
+
+       return 0;
+}
+
+static void hisi_dma_synchronize(struct dma_chan *c)
+{
+       struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+
+       vchan_synchronize(&chan->vc);
+}
+
+static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
+{
+       size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
+       size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
+       struct device *dev = &hdma_dev->pdev->dev;
+       struct hisi_dma_chan *chan;
+       int i;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               chan = &hdma_dev->chan[i];
+               chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
+                                              GFP_KERNEL);
+               if (!chan->sq)
+                       return -ENOMEM;
+
+               chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
+                                              GFP_KERNEL);
+               if (!chan->cq)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+       struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+       u32 hw_depth = hdma_dev->chan_depth - 1;
+       void __iomem *base = hdma_dev->base;
+
+       /* set sq, cq base */
+       hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+                           lower_32_bits(chan->sq_dma));
+       hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+                           upper_32_bits(chan->sq_dma));
+       hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+                           lower_32_bits(chan->cq_dma));
+       hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+                           upper_32_bits(chan->cq_dma));
+
+       /* set sq, cq depth */
+       hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
+       hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+
+       /* init sq tail and cq head */
+       hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+       hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       hisi_dma_init_hw_qp(hdma_dev, qp_index);
+       hisi_dma_unmask_irq(hdma_dev, qp_index);
+       hisi_dma_enable_dma(hdma_dev, qp_index, true);
+}
+
+static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+       hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+}
+
+static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
+{
+       int i;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               hdma_dev->chan[i].qp_num = i;
+               hdma_dev->chan[i].hdma_dev = hdma_dev;
+               hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
+               vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
+               hisi_dma_enable_qp(hdma_dev, i);
+       }
+}
+
+static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
+{
+       int i;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               hisi_dma_disable_qp(hdma_dev, i);
+               tasklet_kill(&hdma_dev->chan[i].vc.task);
+       }
+}
+
+static irqreturn_t hisi_dma_irq(int irq, void *data)
+{
+       struct hisi_dma_chan *chan = data;
+       struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+       struct hisi_dma_desc *desc;
+       struct hisi_dma_cqe *cqe;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       desc = chan->desc;
+       cqe = chan->cq + chan->cq_head;
+       if (desc) {
+               if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
+                       chan->cq_head = (chan->cq_head + 1) %
+                                       hdma_dev->chan_depth;
+                       hisi_dma_chan_write(hdma_dev->base,
+                                           HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
+                                           chan->cq_head);
+                       vchan_cookie_complete(&desc->vd);
+               } else {
+                       dev_err(&hdma_dev->pdev->dev, "task error!\n");
+               }
+
+               chan->desc = NULL;
+       }
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
+{
+       struct pci_dev *pdev = hdma_dev->pdev;
+       int i, ret;
+
+       for (i = 0; i < hdma_dev->chan_num; i++) {
+               ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+                                      hisi_dma_irq, IRQF_SHARED, "hisi_dma",
+                                      &hdma_dev->chan[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* This function enables all hw channels in a device */
+static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
+{
+       int ret;
+
+       ret = hisi_dma_alloc_qps_mem(hdma_dev);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
+               return ret;
+       }
+
+       ret = hisi_dma_request_qps_irq(hdma_dev);
+       if (ret) {
+               dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
+               return ret;
+       }
+
+       hisi_dma_enable_qps(hdma_dev);
+
+       return 0;
+}
+
+static void hisi_dma_disable_hw_channels(void *data)
+{
+       hisi_dma_disable_qps(data);
+}
+
+static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
+                             enum hisi_dma_mode mode)
+{
+       writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+}
+
+static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct device *dev = &pdev->dev;
+       struct hisi_dma_dev *hdma_dev;
+       struct dma_device *dma_dev;
+       size_t dev_size;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               dev_err(dev, "failed to enable device mem!\n");
+               return ret;
+       }
+
+       ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
+       if (ret) {
+               dev_err(dev, "failed to remap I/O region!\n");
+               return ret;
+       }
+
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
+
+       ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
+
+       dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
+                  sizeof(*hdma_dev);
+       hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
+       if (!hdma_dev)
+               return -EINVAL;
+
+       hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
+       hdma_dev->pdev = pdev;
+       hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
+       hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+
+       pci_set_drvdata(pdev, hdma_dev);
+       pci_set_master(pdev);
+
+       ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
+                                   PCI_IRQ_MSI);
+       if (ret < 0) {
+               dev_err(dev, "Failed to allocate MSI vectors!\n");
+               return ret;
+       }
+
+       ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
+       if (ret)
+               return ret;
+
+       dma_dev = &hdma_dev->dma_dev;
+       dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+       dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+       dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+       dma_dev->device_tx_status = hisi_dma_tx_status;
+       dma_dev->device_issue_pending = hisi_dma_issue_pending;
+       dma_dev->device_terminate_all = hisi_dma_terminate_all;
+       dma_dev->device_synchronize = hisi_dma_synchronize;
+       dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+       dma_dev->dev = dev;
+       INIT_LIST_HEAD(&dma_dev->channels);
+
+       hisi_dma_set_mode(hdma_dev, RC);
+
+       ret = hisi_dma_enable_hw_channels(hdma_dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable hw channel!\n");
+               return ret;
+       }
+
+       ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
+                                      hdma_dev);
+       if (ret)
+               return ret;
+
+       ret = dmaenginem_async_device_register(dma_dev);
+       if (ret < 0)
+               dev_err(dev, "failed to register device!\n");
+
+       return ret;
+}
+
+static const struct pci_device_id hisi_dma_pci_tbl[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
+       { 0, }
+};
+
+static struct pci_driver hisi_dma_pci_driver = {
+       .name           = "hisi_dma",
+       .id_table       = hisi_dma_pci_tbl,
+       .probe          = hisi_dma_probe,
+};
+
+module_pci_driver(hisi_dma_pci_driver);
+
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
new file mode 100644 (file)
index 0000000..8978b89
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IDXD) += idxd.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
new file mode 100644 (file)
index 0000000..1d73478
--- /dev/null
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/sched/task.h>
+#include <linux/intel-svm.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+struct idxd_cdev_context {
+       const char *name;
+       dev_t devt;
+       struct ida minor_ida;
+};
+
+/*
+ * ictx is an array based off of accelerator types. enum idxd_type
+ * is used as index
+ */
+static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
+       { .name = "dsa" },
+};
+
+struct idxd_user_context {
+       struct idxd_wq *wq;
+       struct task_struct *task;
+       unsigned int flags;
+};
+
+enum idxd_cdev_cleanup {
+       CDEV_NORMAL = 0,
+       CDEV_FAILED,
+};
+
+static void idxd_cdev_dev_release(struct device *dev)
+{
+       dev_dbg(dev, "releasing cdev device\n");
+       kfree(dev);
+}
+
+static struct device_type idxd_cdev_device_type = {
+       .name = "idxd_cdev",
+       .release = idxd_cdev_dev_release,
+};
+
+static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+{
+       struct cdev *cdev = inode->i_cdev;
+
+       return container_of(cdev, struct idxd_cdev, cdev);
+}
+
+static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+{
+       return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+}
+
+static inline struct idxd_wq *inode_wq(struct inode *inode)
+{
+       return idxd_cdev_wq(inode_idxd_cdev(inode));
+}
+
+static int idxd_cdev_open(struct inode *inode, struct file *filp)
+{
+       struct idxd_user_context *ctx;
+       struct idxd_device *idxd;
+       struct idxd_wq *wq;
+       struct device *dev;
+       struct idxd_cdev *idxd_cdev;
+
+       wq = inode_wq(inode);
+       idxd = wq->idxd;
+       dev = &idxd->pdev->dev;
+       idxd_cdev = &wq->idxd_cdev;
+
+       dev_dbg(dev, "%s called\n", __func__);
+
+       if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+               return -EBUSY;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->wq = wq;
+       filp->private_data = ctx;
+       idxd_wq_get(wq);
+       return 0;
+}
+
+static int idxd_cdev_release(struct inode *node, struct file *filep)
+{
+       struct idxd_user_context *ctx = filep->private_data;
+       struct idxd_wq *wq = ctx->wq;
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+
+       dev_dbg(dev, "%s called\n", __func__);
+       filep->private_data = NULL;
+
+       kfree(ctx);
+       idxd_wq_put(wq);
+       return 0;
+}
+
+static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
+                    const char *func)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+               dev_info_ratelimited(dev,
+                                    "%s: %s: mapping too large: %lu\n",
+                                    current->comm, func,
+                                    vma->vm_end - vma->vm_start);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct idxd_user_context *ctx = filp->private_data;
+       struct idxd_wq *wq = ctx->wq;
+       struct idxd_device *idxd = wq->idxd;
+       struct pci_dev *pdev = idxd->pdev;
+       phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
+       unsigned long pfn;
+       int rc;
+
+       dev_dbg(&pdev->dev, "%s called\n", __func__);
+       rc = check_vma(wq, vma, __func__);
+
+       vma->vm_flags |= VM_DONTCOPY;
+       pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
+                               IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       vma->vm_private_data = ctx;
+
+       return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+                       vma->vm_page_prot);
+}
+
+static __poll_t idxd_cdev_poll(struct file *filp,
+                              struct poll_table_struct *wait)
+{
+       struct idxd_user_context *ctx = filp->private_data;
+       struct idxd_wq *wq = ctx->wq;
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       unsigned long flags;
+       __poll_t out = 0;
+
+       poll_wait(filp, &idxd_cdev->err_queue, wait);
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       if (idxd->sw_err.valid)
+               out = EPOLLIN | EPOLLRDNORM;
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+       return out;
+}
+
+static const struct file_operations idxd_cdev_fops = {
+       .owner = THIS_MODULE,
+       .open = idxd_cdev_open,
+       .release = idxd_cdev_release,
+       .mmap = idxd_cdev_mmap,
+       .poll = idxd_cdev_poll,
+};
+
+int idxd_cdev_get_major(struct idxd_device *idxd)
+{
+       return MAJOR(ictx[idxd->type].devt);
+}
+
+static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       struct idxd_cdev_context *cdev_ctx;
+       struct device *dev;
+       int minor, rc;
+
+       idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+       if (!idxd_cdev->dev)
+               return -ENOMEM;
+
+       dev = idxd_cdev->dev;
+       dev->parent = &idxd->pdev->dev;
+       dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+                    idxd->id, wq->id);
+       dev->bus = idxd_get_bus_type(idxd);
+
+       cdev_ctx = &ictx[wq->idxd->type];
+       minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+       if (minor < 0) {
+               rc = minor;
+               goto ida_err;
+       }
+
+       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+       dev->type = &idxd_cdev_device_type;
+       rc = device_register(dev);
+       if (rc < 0) {
+               dev_err(&idxd->pdev->dev, "device register failed\n");
+               put_device(dev);
+               goto dev_reg_err;
+       }
+       idxd_cdev->minor = minor;
+
+       return 0;
+
+ dev_reg_err:
+       ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ ida_err:
+       kfree(dev);
+       idxd_cdev->dev = NULL;
+       return rc;
+}
+
+static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+                                enum idxd_cdev_cleanup cdev_state)
+{
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       struct idxd_cdev_context *cdev_ctx;
+
+       cdev_ctx = &ictx[wq->idxd->type];
+       if (cdev_state == CDEV_NORMAL)
+               cdev_del(&idxd_cdev->cdev);
+       device_unregister(idxd_cdev->dev);
+       /*
+        * The device_type->release() will be called on the device and free
+        * the allocated struct device. We can just forget it.
+        */
+       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       idxd_cdev->dev = NULL;
+       idxd_cdev->minor = -1;
+}
+
+int idxd_wq_add_cdev(struct idxd_wq *wq)
+{
+       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+       struct cdev *cdev = &idxd_cdev->cdev;
+       struct device *dev;
+       int rc;
+
+       rc = idxd_wq_cdev_dev_setup(wq);
+       if (rc < 0)
+               return rc;
+
+       dev = idxd_cdev->dev;
+       cdev_init(cdev, &idxd_cdev_fops);
+       cdev_set_parent(cdev, &dev->kobj);
+       rc = cdev_add(cdev, dev->devt, 1);
+       if (rc) {
+               dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+               idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+               return rc;
+       }
+
+       init_waitqueue_head(&idxd_cdev->err_queue);
+       return 0;
+}
+
+void idxd_wq_del_cdev(struct idxd_wq *wq)
+{
+       idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+}
+
+int idxd_cdev_register(void)
+{
+       int rc, i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               ida_init(&ictx[i].minor_ida);
+               rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
+                                        ictx[i].name);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+void idxd_cdev_remove(void)
+{
+       int i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               unregister_chrdev_region(ictx[i].devt, MINORMASK);
+               ida_destroy(&ictx[i].minor_ida);
+       }
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
new file mode 100644 (file)
index 0000000..ada69e7
--- /dev/null
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+
+/* Interrupt control bits */
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       int msixcnt = pci_msix_vec_count(pdev);
+       union msix_perm perm;
+       u32 offset;
+
+       if (vec_id < 0 || vec_id >= msixcnt)
+               return -EINVAL;
+
+       offset = idxd->msix_perm_offset + vec_id * 8;
+       perm.bits = ioread32(idxd->reg_base + offset);
+       perm.ignore = 1;
+       iowrite32(perm.bits, idxd->reg_base + offset);
+
+       return 0;
+}
+
+void idxd_mask_msix_vectors(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       int msixcnt = pci_msix_vec_count(pdev);
+       int i, rc;
+
+       for (i = 0; i < msixcnt; i++) {
+               rc = idxd_mask_msix_vector(idxd, i);
+               if (rc < 0)
+                       dev_warn(&pdev->dev,
+                                "Failed disabling msix vec %d\n", i);
+       }
+}
+
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       int msixcnt = pci_msix_vec_count(pdev);
+       union msix_perm perm;
+       u32 offset;
+
+       if (vec_id < 0 || vec_id >= msixcnt)
+               return -EINVAL;
+
+       offset = idxd->msix_perm_offset + vec_id * 8;
+       perm.bits = ioread32(idxd->reg_base + offset);
+       perm.ignore = 0;
+       iowrite32(perm.bits, idxd->reg_base + offset);
+
+       return 0;
+}
+
+void idxd_unmask_error_interrupts(struct idxd_device *idxd)
+{
+       union genctrl_reg genctrl;
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.softerr_int_en = 1;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+void idxd_mask_error_interrupts(struct idxd_device *idxd)
+{
+       union genctrl_reg genctrl;
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.softerr_int_en = 0;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+static void free_hw_descs(struct idxd_wq *wq)
+{
+       int i;
+
+       for (i = 0; i < wq->num_descs; i++)
+               kfree(wq->hw_descs[i]);
+
+       kfree(wq->hw_descs);
+}
+
+static int alloc_hw_descs(struct idxd_wq *wq, int num)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+       int i;
+       int node = dev_to_node(dev);
+
+       wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
+                                   GFP_KERNEL, node);
+       if (!wq->hw_descs)
+               return -ENOMEM;
+
+       for (i = 0; i < num; i++) {
+               wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
+                                              GFP_KERNEL, node);
+               if (!wq->hw_descs[i]) {
+                       free_hw_descs(wq);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static void free_descs(struct idxd_wq *wq)
+{
+       int i;
+
+       for (i = 0; i < wq->num_descs; i++)
+               kfree(wq->descs[i]);
+
+       kfree(wq->descs);
+}
+
+static int alloc_descs(struct idxd_wq *wq, int num)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+       int i;
+       int node = dev_to_node(dev);
+
+       wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
+                                GFP_KERNEL, node);
+       if (!wq->descs)
+               return -ENOMEM;
+
+       for (i = 0; i < num; i++) {
+               wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
+                                           GFP_KERNEL, node);
+               if (!wq->descs[i]) {
+                       free_descs(wq);
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+/* WQ control bits */
+int idxd_wq_alloc_resources(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_group *group = wq->group;
+       struct device *dev = &idxd->pdev->dev;
+       int rc, num_descs, i;
+
+       if (wq->type != IDXD_WQT_KERNEL)
+               return 0;
+
+       num_descs = wq->size +
+               idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
+       wq->num_descs = num_descs;
+
+       rc = alloc_hw_descs(wq, num_descs);
+       if (rc < 0)
+               return rc;
+
+       wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
+       wq->compls = dma_alloc_coherent(dev, wq->compls_size,
+                                       &wq->compls_addr, GFP_KERNEL);
+       if (!wq->compls) {
+               rc = -ENOMEM;
+               goto fail_alloc_compls;
+       }
+
+       rc = alloc_descs(wq, num_descs);
+       if (rc < 0)
+               goto fail_alloc_descs;
+
+       rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
+                              dev_to_node(dev));
+       if (rc < 0)
+               goto fail_sbitmap_init;
+
+       for (i = 0; i < num_descs; i++) {
+               struct idxd_desc *desc = wq->descs[i];
+
+               desc->hw = wq->hw_descs[i];
+               desc->completion = &wq->compls[i];
+               desc->compl_dma  = wq->compls_addr +
+                       sizeof(struct dsa_completion_record) * i;
+               desc->id = i;
+               desc->wq = wq;
+
+               dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+               desc->txd.tx_submit = idxd_dma_tx_submit;
+       }
+
+       return 0;
+
+ fail_sbitmap_init:
+       free_descs(wq);
+ fail_alloc_descs:
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ fail_alloc_compls:
+       free_hw_descs(wq);
+       return rc;
+}
+
+void idxd_wq_free_resources(struct idxd_wq *wq)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       if (wq->type != IDXD_WQT_KERNEL)
+               return;
+
+       free_hw_descs(wq);
+       free_descs(wq);
+       dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+       sbitmap_free(&wq->sbmap);
+}
+
+int idxd_wq_enable(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 status;
+       int rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+
+       if (wq->state == IDXD_WQ_ENABLED) {
+               dev_dbg(dev, "WQ %d already enabled\n", wq->id);
+               return -ENXIO;
+       }
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       if (status != IDXD_CMDSTS_SUCCESS &&
+           status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
+               dev_dbg(dev, "WQ enable failed: %#x\n", status);
+               return -ENXIO;
+       }
+
+       wq->state = IDXD_WQ_ENABLED;
+       dev_dbg(dev, "WQ %d enabled\n", wq->id);
+       return 0;
+}
+
+int idxd_wq_disable(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 status, operand;
+       int rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       dev_dbg(dev, "Disabling WQ %d\n", wq->id);
+
+       if (wq->state != IDXD_WQ_ENABLED) {
+               dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+               return 0;
+       }
+
+       operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+       rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       if (status != IDXD_CMDSTS_SUCCESS) {
+               dev_dbg(dev, "WQ disable failed: %#x\n", status);
+               return -ENXIO;
+       }
+
+       wq->state = IDXD_WQ_DISABLED;
+       dev_dbg(dev, "WQ %d disabled\n", wq->id);
+       return 0;
+}
+
+int idxd_wq_map_portal(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct pci_dev *pdev = idxd->pdev;
+       struct device *dev = &pdev->dev;
+       resource_size_t start;
+
+       start = pci_resource_start(pdev, IDXD_WQ_BAR);
+       start = start + wq->id * IDXD_PORTAL_SIZE;
+
+       wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+       if (!wq->dportal)
+               return -ENOMEM;
+       dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
+
+       return 0;
+}
+
+void idxd_wq_unmap_portal(struct idxd_wq *wq)
+{
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       devm_iounmap(dev, wq->dportal);
+}
+
+/* Device control bits */
+static inline bool idxd_is_enabled(struct idxd_device *idxd)
+{
+       union gensts_reg gensts;
+
+       gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+       if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
+               return true;
+       return false;
+}
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+{
+       u32 sts, to = timeout;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+       while (sts & IDXD_CMDSTS_ACTIVE && --to) {
+               cpu_relax();
+               sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+       }
+
+       if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
+               dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
+               *status = 0;
+               return -EBUSY;
+       }
+
+       *status = sts;
+       return 0;
+}
+
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+{
+       union idxd_command_reg cmd;
+       int rc;
+       u32 status;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd = cmd_code;
+       cmd.operand = operand;
+       dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
+               __func__, cmd_code, operand);
+       iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+       return 0;
+}
+
+int idxd_device_enable(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+       u32 status;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       if (idxd_is_enabled(idxd)) {
+               dev_dbg(dev, "Device already enabled\n");
+               return -ENXIO;
+       }
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       /* If the command is successful or if the device was enabled */
+       if (status != IDXD_CMDSTS_SUCCESS &&
+           status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
+               dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+               return -ENXIO;
+       }
+
+       idxd->state = IDXD_DEV_ENABLED;
+       return 0;
+}
+
+int idxd_device_disable(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+       u32 status;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       if (!idxd_is_enabled(idxd)) {
+               dev_dbg(dev, "Device is not enabled\n");
+               return 0;
+       }
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       /* If the command is successful or if the device was disabled */
+       if (status != IDXD_CMDSTS_SUCCESS &&
+           !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
+               dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+               rc = -ENXIO;
+               return rc;
+       }
+
+       idxd->state = IDXD_DEV_CONF_READY;
+       return 0;
+}
+
+int __idxd_device_reset(struct idxd_device *idxd)
+{
+       u32 status;
+       int rc;
+
+       rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
+       if (rc < 0)
+               return rc;
+       rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
+
+int idxd_device_reset(struct idxd_device *idxd)
+{
+       unsigned long flags;
+       int rc;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       rc = __idxd_device_reset(idxd);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       return rc;
+}
+
+/* Device configuration bits */
+static void idxd_group_config_write(struct idxd_group *group)
+{
+       struct idxd_device *idxd = group->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       int i;
+       u32 grpcfg_offset;
+
+       dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
+
+       /* setup GRPWQCFG */
+       for (i = 0; i < 4; i++) {
+               grpcfg_offset = idxd->grpcfg_offset +
+                       group->id * 64 + i * sizeof(u64);
+               iowrite64(group->grpcfg.wqs[i],
+                         idxd->reg_base + grpcfg_offset);
+               dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+                       group->id, i, grpcfg_offset,
+                       ioread64(idxd->reg_base + grpcfg_offset));
+       }
+
+       /* setup GRPENGCFG */
+       grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+       iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
+       dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+               grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
+
+       /* setup GRPFLAGS */
+       grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+       iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+       dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+               group->id, grpcfg_offset,
+               ioread32(idxd->reg_base + grpcfg_offset));
+}
+
+static int idxd_groups_config_write(struct idxd_device *idxd)
+
+{
+       union gencfg_reg reg;
+       int i;
+       struct device *dev = &idxd->pdev->dev;
+
+       /* Setup bandwidth token limit */
+       if (idxd->token_limit) {
+               reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+               reg.token_limit = idxd->token_limit;
+               iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+       }
+
+       dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
+               ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               idxd_group_config_write(group);
+       }
+
+       return 0;
+}
+
+static int idxd_wq_config_write(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 wq_offset;
+       int i;
+
+       if (!wq->group)
+               return 0;
+
+       memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+
+       /* byte 0-3 */
+       wq->wqcfg.wq_size = wq->size;
+
+       if (wq->size == 0) {
+               dev_warn(dev, "Incorrect work queue size: 0\n");
+               return -EINVAL;
+       }
+
+       /* bytes 4-7 */
+       wq->wqcfg.wq_thresh = wq->threshold;
+
+       /* byte 8-11 */
+       wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+       wq->wqcfg.mode = 1;
+
+       wq->wqcfg.priority = wq->priority;
+
+       /* bytes 12-15 */
+       wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+       wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+
+       dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+       for (i = 0; i < 8; i++) {
+               wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+               iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+                       wq->id, i, wq_offset,
+                       ioread32(idxd->reg_base + wq_offset));
+       }
+
+       return 0;
+}
+
+static int idxd_wqs_config_write(struct idxd_device *idxd)
+{
+       int i, rc;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               rc = idxd_wq_config_write(wq);
+               if (rc < 0)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static void idxd_group_flags_setup(struct idxd_device *idxd)
+{
+       int i;
+
+       /* TC-A 0 and TC-B 1 should be defaults */
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               if (group->tc_a == -1)
+                       group->grpcfg.flags.tc_a = 0;
+               else
+                       group->grpcfg.flags.tc_a = group->tc_a;
+               if (group->tc_b == -1)
+                       group->grpcfg.flags.tc_b = 1;
+               else
+                       group->grpcfg.flags.tc_b = group->tc_b;
+               group->grpcfg.flags.use_token_limit = group->use_token_limit;
+               group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
+               if (group->tokens_allowed)
+                       group->grpcfg.flags.tokens_allowed =
+                               group->tokens_allowed;
+               else
+                       group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+       }
+}
+
+static int idxd_engines_setup(struct idxd_device *idxd)
+{
+       int i, engines = 0;
+       struct idxd_engine *eng;
+       struct idxd_group *group;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               group = &idxd->groups[i];
+               group->grpcfg.engines = 0;
+       }
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               eng = &idxd->engines[i];
+               group = eng->group;
+
+               if (!group)
+                       continue;
+
+               group->grpcfg.engines |= BIT(eng->id);
+               engines++;
+       }
+
+       if (!engines)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int idxd_wqs_setup(struct idxd_device *idxd)
+{
+       struct idxd_wq *wq;
+       struct idxd_group *group;
+       int i, j, configured = 0;
+       struct device *dev = &idxd->pdev->dev;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               group = &idxd->groups[i];
+               for (j = 0; j < 4; j++)
+                       group->grpcfg.wqs[j] = 0;
+       }
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               wq = &idxd->wqs[i];
+               group = wq->group;
+
+               if (!wq->group)
+                       continue;
+               if (!wq->size)
+                       continue;
+
+               if (!wq_dedicated(wq)) {
+                       dev_warn(dev, "No shared workqueue support.\n");
+                       return -EINVAL;
+               }
+
+               group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
+               configured++;
+       }
+
+       if (configured == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+int idxd_device_config(struct idxd_device *idxd)
+{
+       int rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       rc = idxd_wqs_setup(idxd);
+       if (rc < 0)
+               return rc;
+
+       rc = idxd_engines_setup(idxd);
+       if (rc < 0)
+               return rc;
+
+       idxd_group_flags_setup(idxd);
+
+       rc = idxd_wqs_config_write(idxd);
+       if (rc < 0)
+               return rc;
+
+       rc = idxd_groups_config_write(idxd);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
new file mode 100644 (file)
index 0000000..c64c142
--- /dev/null
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+{
+       return container_of(c, struct idxd_wq, dma_chan);
+}
+
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+                          enum idxd_complete_type comp_type)
+{
+       struct dma_async_tx_descriptor *tx;
+       struct dmaengine_result res;
+       int complete = 1;
+
+       if (desc->completion->status == DSA_COMP_SUCCESS)
+               res.result = DMA_TRANS_NOERROR;
+       else if (desc->completion->status)
+               res.result = DMA_TRANS_WRITE_FAILED;
+       else if (comp_type == IDXD_COMPLETE_ABORT)
+               res.result = DMA_TRANS_ABORTED;
+       else
+               complete = 0;
+
+       tx = &desc->txd;
+       if (complete && tx->cookie) {
+               dma_cookie_complete(tx);
+               dma_descriptor_unmap(tx);
+               dmaengine_desc_get_callback_invoke(tx, &res);
+               tx->callback = NULL;
+               tx->callback_result = NULL;
+       }
+}
+
+static void op_flag_setup(unsigned long flags, u32 *desc_flags)
+{
+       *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
+       if (flags & DMA_PREP_INTERRUPT)
+               *desc_flags |= IDXD_OP_FLAG_RCI;
+}
+
+static inline void set_completion_address(struct idxd_desc *desc,
+                                         u64 *compl_addr)
+{
+               *compl_addr = desc->compl_dma;
+}
+
+static inline void idxd_prep_desc_common(struct idxd_wq *wq,
+                                        struct dsa_hw_desc *hw, char opcode,
+                                        u64 addr_f1, u64 addr_f2, u64 len,
+                                        u64 compl, u32 flags)
+{
+       struct idxd_device *idxd = wq->idxd;
+
+       hw->flags = flags;
+       hw->opcode = opcode;
+       hw->src_addr = addr_f1;
+       hw->dst_addr = addr_f2;
+       hw->xfer_size = len;
+       hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+       hw->completion_addr = compl;
+
+       /*
+        * Descriptor completion vectors are 1-8 for MSIX. We will round
+        * robin through the 8 vectors.
+        */
+       wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+       hw->int_handle =  wq->vec_ptr;
+}
+
+static struct dma_async_tx_descriptor *
+idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+                      dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+       struct idxd_wq *wq = to_idxd_wq(c);
+       u32 desc_flags;
+       struct idxd_device *idxd = wq->idxd;
+       struct idxd_desc *desc;
+
+       if (wq->state != IDXD_WQ_ENABLED)
+               return NULL;
+
+       if (len > idxd->max_xfer_bytes)
+               return NULL;
+
+       op_flag_setup(flags, &desc_flags);
+       desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+       if (IS_ERR(desc))
+               return NULL;
+
+       idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
+                             dma_src, dma_dest, len, desc->compl_dma,
+                             desc_flags);
+
+       desc->txd.flags = flags;
+
+       return &desc->txd;
+}
+
+static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct idxd_wq *wq = to_idxd_wq(chan);
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       idxd_wq_get(wq);
+       dev_dbg(dev, "%s: client_count: %d\n", __func__,
+               idxd_wq_refcount(wq));
+       return 0;
+}
+
+static void idxd_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct idxd_wq *wq = to_idxd_wq(chan);
+       struct device *dev = &wq->idxd->pdev->dev;
+
+       idxd_wq_put(wq);
+       dev_dbg(dev, "%s: client_count: %d\n", __func__,
+               idxd_wq_refcount(wq));
+}
+
+static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
+                                         dma_cookie_t cookie,
+                                         struct dma_tx_state *txstate)
+{
+       return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/*
+ * issue_pending() does not need to do anything since tx_submit() does the job
+ * already.
+ */
+static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+{
+}
+
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct dma_chan *c = tx->chan;
+       struct idxd_wq *wq = to_idxd_wq(c);
+       dma_cookie_t cookie;
+       int rc;
+       struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
+
+       cookie = dma_cookie_assign(tx);
+
+       rc = idxd_submit_desc(wq, desc);
+       if (rc < 0) {
+               idxd_free_desc(wq, desc);
+               return rc;
+       }
+
+       return cookie;
+}
+
+static void idxd_dma_release(struct dma_device *device)
+{
+}
+
+int idxd_register_dma_device(struct idxd_device *idxd)
+{
+       struct dma_device *dma = &idxd->dma_dev;
+
+       INIT_LIST_HEAD(&dma->channels);
+       dma->dev = &idxd->pdev->dev;
+
+       dma->device_release = idxd_dma_release;
+
+       if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
+               dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+               dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
+       }
+
+       dma->device_tx_status = idxd_dma_tx_status;
+       dma->device_issue_pending = idxd_dma_issue_pending;
+       dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+
+       return dma_async_device_register(&idxd->dma_dev);
+}
+
+void idxd_unregister_dma_device(struct idxd_device *idxd)
+{
+       dma_async_device_unregister(&idxd->dma_dev);
+}
+
+int idxd_register_dma_channel(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct dma_device *dma = &idxd->dma_dev;
+       struct dma_chan *chan = &wq->dma_chan;
+       int rc;
+
+       memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+       chan->device = dma;
+       list_add_tail(&chan->device_node, &dma->channels);
+       rc = dma_async_device_channel_register(dma, chan);
+       if (rc < 0)
+               return rc;
+
+       return 0;
+}
+
+void idxd_unregister_dma_channel(struct idxd_wq *wq)
+{
+       dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
new file mode 100644 (file)
index 0000000..b8f8a36
--- /dev/null
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_H_
+#define _IDXD_H_
+
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include "registers.h"
+
+#define IDXD_DRIVER_VERSION    "1.00"
+
+extern struct kmem_cache *idxd_desc_pool;
+
+#define IDXD_REG_TIMEOUT       50
+#define IDXD_DRAIN_TIMEOUT     5000
+
+enum idxd_type {
+       IDXD_TYPE_UNKNOWN = -1,
+       IDXD_TYPE_DSA = 0,
+       IDXD_TYPE_MAX
+};
+
+#define IDXD_NAME_SIZE         128
+
+struct idxd_device_driver {
+       struct device_driver drv;
+};
+
+struct idxd_irq_entry {
+       struct idxd_device *idxd;
+       int id;
+       struct llist_head pending_llist;
+       struct list_head work_list;
+};
+
+struct idxd_group {
+       struct device conf_dev;
+       struct idxd_device *idxd;
+       struct grpcfg grpcfg;
+       int id;
+       int num_engines;
+       int num_wqs;
+       bool use_token_limit;
+       u8 tokens_allowed;
+       u8 tokens_reserved;
+       int tc_a;
+       int tc_b;
+};
+
+#define IDXD_MAX_PRIORITY      0xf
+
+enum idxd_wq_state {
+       IDXD_WQ_DISABLED = 0,
+       IDXD_WQ_ENABLED,
+};
+
+enum idxd_wq_flag {
+       WQ_FLAG_DEDICATED = 0,
+};
+
+enum idxd_wq_type {
+       IDXD_WQT_NONE = 0,
+       IDXD_WQT_KERNEL,
+       IDXD_WQT_USER,
+};
+
+struct idxd_cdev {
+       struct cdev cdev;
+       struct device *dev;
+       int minor;
+       struct wait_queue_head err_queue;
+};
+
+#define IDXD_ALLOCATED_BATCH_SIZE      128U
+#define WQ_NAME_SIZE   1024
+#define WQ_TYPE_SIZE   10
+
+enum idxd_op_type {
+       IDXD_OP_BLOCK = 0,
+       IDXD_OP_NONBLOCK = 1,
+};
+
+enum idxd_complete_type {
+       IDXD_COMPLETE_NORMAL = 0,
+       IDXD_COMPLETE_ABORT,
+};
+
+struct idxd_wq {
+       void __iomem *dportal;
+       struct device conf_dev;
+       struct idxd_cdev idxd_cdev;
+       struct idxd_device *idxd;
+       int id;
+       enum idxd_wq_type type;
+       struct idxd_group *group;
+       int client_count;
+       struct mutex wq_lock;   /* mutex for workqueue */
+       u32 size;
+       u32 threshold;
+       u32 priority;
+       enum idxd_wq_state state;
+       unsigned long flags;
+       union wqcfg wqcfg;
+       atomic_t dq_count;      /* dedicated queue flow control */
+       u32 vec_ptr;            /* interrupt steering */
+       struct dsa_hw_desc **hw_descs;
+       int num_descs;
+       struct dsa_completion_record *compls;
+       dma_addr_t compls_addr;
+       int compls_size;
+       struct idxd_desc **descs;
+       struct sbitmap sbmap;
+       struct dma_chan dma_chan;
+       struct percpu_rw_semaphore submit_lock;
+       wait_queue_head_t submit_waitq;
+       char name[WQ_NAME_SIZE + 1];
+};
+
+struct idxd_engine {
+       struct device conf_dev;
+       int id;
+       struct idxd_group *group;
+       struct idxd_device *idxd;
+};
+
+/* shadow registers */
+struct idxd_hw {
+       u32 version;
+       union gen_cap_reg gen_cap;
+       union wq_cap_reg wq_cap;
+       union group_cap_reg group_cap;
+       union engine_cap_reg engine_cap;
+       struct opcap opcap;
+};
+
+enum idxd_device_state {
+       IDXD_DEV_HALTED = -1,
+       IDXD_DEV_DISABLED = 0,
+       IDXD_DEV_CONF_READY,
+       IDXD_DEV_ENABLED,
+};
+
+enum idxd_device_flag {
+       IDXD_FLAG_CONFIGURABLE = 0,
+};
+
+struct idxd_device {
+       enum idxd_type type;
+       struct device conf_dev;
+       struct list_head list;
+       struct idxd_hw hw;
+       enum idxd_device_state state;
+       unsigned long flags;
+       int id;
+       int major;
+
+       struct pci_dev *pdev;
+       void __iomem *reg_base;
+
+       spinlock_t dev_lock;    /* spinlock for device */
+       struct idxd_group *groups;
+       struct idxd_wq *wqs;
+       struct idxd_engine *engines;
+
+       int num_groups;
+
+       u32 msix_perm_offset;
+       u32 wqcfg_offset;
+       u32 grpcfg_offset;
+       u32 perfmon_offset;
+
+       u64 max_xfer_bytes;
+       u32 max_batch_size;
+       int max_groups;
+       int max_engines;
+       int max_tokens;
+       int max_wqs;
+       int max_wq_size;
+       int token_limit;
+       int nr_tokens;          /* non-reserved tokens */
+
+       union sw_err_reg sw_err;
+
+       struct msix_entry *msix_entries;
+       int num_wq_irqs;
+       struct idxd_irq_entry *irq_entries;
+
+       struct dma_device dma_dev;
+};
+
+/* IDXD software descriptor */
+struct idxd_desc {
+       struct dsa_hw_desc *hw;
+       dma_addr_t desc_dma;
+       struct dsa_completion_record *completion;
+       dma_addr_t compl_dma;
+       struct dma_async_tx_descriptor txd;
+       struct llist_node llnode;
+       struct list_head list;
+       int id;
+       struct idxd_wq *wq;
+};
+
+#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
+#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+
+extern struct bus_type dsa_bus_type;
+
+static inline bool wq_dedicated(struct idxd_wq *wq)
+{
+       return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+enum idxd_portal_prot {
+       IDXD_PORTAL_UNLIMITED = 0,
+       IDXD_PORTAL_LIMITED,
+};
+
+static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
+{
+       return prot * 0x1000;
+}
+
+static inline int idxd_get_wq_portal_full_offset(int wq_id,
+                                                enum idxd_portal_prot prot)
+{
+       return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+}
+
+static inline void idxd_set_type(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+
+       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+               idxd->type = IDXD_TYPE_DSA;
+       else
+               idxd->type = IDXD_TYPE_UNKNOWN;
+}
+
+static inline void idxd_wq_get(struct idxd_wq *wq)
+{
+       wq->client_count++;
+}
+
+static inline void idxd_wq_put(struct idxd_wq *wq)
+{
+       wq->client_count--;
+}
+
+static inline int idxd_wq_refcount(struct idxd_wq *wq)
+{
+       return wq->client_count;
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd);
+int idxd_register_bus_type(void);
+void idxd_unregister_bus_type(void);
+int idxd_setup_sysfs(struct idxd_device *idxd);
+void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_driver(void);
+void idxd_unregister_driver(void);
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+
+/* device interrupt control */
+irqreturn_t idxd_irq_handler(int vec, void *data);
+irqreturn_t idxd_misc_thread(int vec, void *data);
+irqreturn_t idxd_wq_thread(int irq, void *data);
+void idxd_mask_error_interrupts(struct idxd_device *idxd);
+void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+void idxd_mask_msix_vectors(struct idxd_device *idxd);
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
+
+/* device control */
+int idxd_device_enable(struct idxd_device *idxd);
+int idxd_device_disable(struct idxd_device *idxd);
+int idxd_device_reset(struct idxd_device *idxd);
+int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_cleanup(struct idxd_device *idxd);
+int idxd_device_config(struct idxd_device *idxd);
+void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+
+/* work queue control */
+int idxd_wq_alloc_resources(struct idxd_wq *wq);
+void idxd_wq_free_resources(struct idxd_wq *wq);
+int idxd_wq_enable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_map_portal(struct idxd_wq *wq);
+void idxd_wq_unmap_portal(struct idxd_wq *wq);
+
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
+/* dmaengine */
+int idxd_register_dma_device(struct idxd_device *idxd);
+void idxd_unregister_dma_device(struct idxd_device *idxd);
+int idxd_register_dma_channel(struct idxd_wq *wq);
+void idxd_unregister_dma_channel(struct idxd_wq *wq);
+void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+                          enum idxd_complete_type comp_type);
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+/* cdev */
+int idxd_cdev_register(void);
+void idxd_cdev_remove(void);
+int idxd_cdev_get_major(struct idxd_device *idxd);
+int idxd_wq_add_cdev(struct idxd_wq *wq);
+void idxd_wq_del_cdev(struct idxd_wq *wq);
+
+#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
new file mode 100644 (file)
index 0000000..7778c05
--- /dev/null
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <uapi/linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+
+#define DRV_NAME "idxd"
+
+static struct idr idxd_idrs[IDXD_TYPE_MAX];
+static struct mutex idxd_idr_lock;
+
+static struct pci_device_id idxd_pci_tbl[] = {
+       /* DSA ver 1.0 platforms */
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+
+static char *idxd_name[] = {
+       "dsa",
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd)
+{
+       return idxd_name[idxd->type];
+}
+
+static int idxd_setup_interrupts(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       struct device *dev = &pdev->dev;
+       struct msix_entry *msix;
+       struct idxd_irq_entry *irq_entry;
+       int i, msixcnt;
+       int rc = 0;
+
+       msixcnt = pci_msix_vec_count(pdev);
+       if (msixcnt < 0) {
+               dev_err(dev, "Not MSI-X interrupt capable.\n");
+               goto err_no_irq;
+       }
+
+       idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+                       msixcnt, GFP_KERNEL);
+       if (!idxd->msix_entries) {
+               rc = -ENOMEM;
+               goto err_no_irq;
+       }
+
+       for (i = 0; i < msixcnt; i++)
+               idxd->msix_entries[i].entry = i;
+
+       rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+       if (rc) {
+               dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+               goto err_no_irq;
+       }
+       dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+
+       /*
+        * We implement 1 completion list per MSI-X entry except for
+        * entry 0, which is for errors and others.
+        */
+       idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+                                        sizeof(struct idxd_irq_entry),
+                                        GFP_KERNEL);
+       if (!idxd->irq_entries) {
+               rc = -ENOMEM;
+               goto err_no_irq;
+       }
+
+       for (i = 0; i < msixcnt; i++) {
+               idxd->irq_entries[i].id = i;
+               idxd->irq_entries[i].idxd = idxd;
+       }
+
+       msix = &idxd->msix_entries[0];
+       irq_entry = &idxd->irq_entries[0];
+       rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+                                      idxd_misc_thread, 0, "idxd-misc",
+                                      irq_entry);
+       if (rc < 0) {
+               dev_err(dev, "Failed to allocate misc interrupt.\n");
+               goto err_no_irq;
+       }
+
+       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+               msix->vector);
+
+       /* first MSI-X entry is not for wq interrupts */
+       idxd->num_wq_irqs = msixcnt - 1;
+
+       for (i = 1; i < msixcnt; i++) {
+               msix = &idxd->msix_entries[i];
+               irq_entry = &idxd->irq_entries[i];
+
+               init_llist_head(&idxd->irq_entries[i].pending_llist);
+               INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+               rc = devm_request_threaded_irq(dev, msix->vector,
+                                              idxd_irq_handler,
+                                              idxd_wq_thread, 0,
+                                              "idxd-portal", irq_entry);
+               if (rc < 0) {
+                       dev_err(dev, "Failed to allocate irq %d.\n",
+                               msix->vector);
+                       goto err_no_irq;
+               }
+               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+                       i, msix->vector);
+       }
+
+       idxd_unmask_error_interrupts(idxd);
+
+       return 0;
+
+ err_no_irq:
+       /* Disable error interrupt generation */
+       idxd_mask_error_interrupts(idxd);
+       pci_disable_msix(pdev);
+       dev_err(dev, "No usable interrupts\n");
+       return rc;
+}
+
+static void idxd_wqs_free_lock(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               percpu_free_rwsem(&wq->submit_lock);
+       }
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i;
+
+       idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+                                   sizeof(struct idxd_group), GFP_KERNEL);
+       if (!idxd->groups)
+               return -ENOMEM;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               idxd->groups[i].idxd = idxd;
+               idxd->groups[i].id = i;
+               idxd->groups[i].tc_a = -1;
+               idxd->groups[i].tc_b = -1;
+       }
+
+       idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+                                GFP_KERNEL);
+       if (!idxd->wqs)
+               return -ENOMEM;
+
+       idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+                                    sizeof(struct idxd_engine), GFP_KERNEL);
+       if (!idxd->engines)
+               return -ENOMEM;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+               int rc;
+
+               wq->id = i;
+               wq->idxd = idxd;
+               mutex_init(&wq->wq_lock);
+               atomic_set(&wq->dq_count, 0);
+               init_waitqueue_head(&wq->submit_waitq);
+               wq->idxd_cdev.minor = -1;
+               rc = percpu_init_rwsem(&wq->submit_lock);
+               if (rc < 0) {
+                       idxd_wqs_free_lock(idxd);
+                       return rc;
+               }
+       }
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               idxd->engines[i].idxd = idxd;
+               idxd->engines[i].id = i;
+       }
+
+       return 0;
+}
+
+static void idxd_read_table_offsets(struct idxd_device *idxd)
+{
+       union offsets_reg offsets;
+       struct device *dev = &idxd->pdev->dev;
+
+       offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
+       offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
+                       + sizeof(u64));
+       idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+       dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
+       idxd->wqcfg_offset = offsets.wqcfg * 0x100;
+       dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
+               idxd->wqcfg_offset);
+       idxd->msix_perm_offset = offsets.msix_perm * 0x100;
+       dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
+               idxd->msix_perm_offset);
+       idxd->perfmon_offset = offsets.perfmon * 0x100;
+       dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
+}
+
+static void idxd_read_caps(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i;
+
+       /* reading generic capabilities */
+       idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
+       dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+       idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
+       dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
+       idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+       dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+       if (idxd->hw.gen_cap.config_en)
+               set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
+
+       /* reading group capabilities */
+       idxd->hw.group_cap.bits =
+               ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
+       dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
+       idxd->max_groups = idxd->hw.group_cap.num_groups;
+       dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
+       idxd->max_tokens = idxd->hw.group_cap.total_tokens;
+       dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
+       idxd->nr_tokens = idxd->max_tokens;
+
+       /* read engine capabilities */
+       idxd->hw.engine_cap.bits =
+               ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
+       dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
+       idxd->max_engines = idxd->hw.engine_cap.num_engines;
+       dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
+
+       /* read workqueue capabilities */
+       idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
+       dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
+       idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
+       dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+       idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+       dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+
+       /* reading operation capabilities */
+       for (i = 0; i < 4; i++) {
+               idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
+                               IDXD_OPCAP_OFFSET + i * sizeof(u64));
+               dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
+       }
+}
+
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
+                                     void __iomem * const *iomap)
+{
+       struct device *dev = &pdev->dev;
+       struct idxd_device *idxd;
+
+       idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+       if (!idxd)
+               return NULL;
+
+       idxd->pdev = pdev;
+       idxd->reg_base = iomap[IDXD_MMIO_BAR];
+       spin_lock_init(&idxd->dev_lock);
+
+       return idxd;
+}
+
+static int idxd_probe(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       struct device *dev = &pdev->dev;
+       int rc;
+
+       dev_dbg(dev, "%s entered and resetting device\n", __func__);
+       rc = idxd_device_reset(idxd);
+       if (rc < 0)
+               return rc;
+       dev_dbg(dev, "IDXD reset complete\n");
+
+       idxd_read_caps(idxd);
+       idxd_read_table_offsets(idxd);
+
+       rc = idxd_setup_internals(idxd);
+       if (rc)
+               goto err_setup;
+
+       rc = idxd_setup_interrupts(idxd);
+       if (rc)
+               goto err_setup;
+
+       dev_dbg(dev, "IDXD interrupt setup complete.\n");
+
+       mutex_lock(&idxd_idr_lock);
+       idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+       mutex_unlock(&idxd_idr_lock);
+       if (idxd->id < 0) {
+               rc = -ENOMEM;
+               goto err_idr_fail;
+       }
+
+       idxd->major = idxd_cdev_get_major(idxd);
+
+       dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+       return 0;
+
+ err_idr_fail:
+       idxd_mask_error_interrupts(idxd);
+       idxd_mask_msix_vectors(idxd);
+ err_setup:
+       return rc;
+}
+
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       void __iomem * const *iomap;
+       struct device *dev = &pdev->dev;
+       struct idxd_device *idxd;
+       int rc;
+       unsigned int mask;
+
+       rc = pcim_enable_device(pdev);
+       if (rc)
+               return rc;
+
+       dev_dbg(dev, "Mapping BARs\n");
+       mask = (1 << IDXD_MMIO_BAR);
+       rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
+       if (rc)
+               return rc;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return -ENOMEM;
+
+       dev_dbg(dev, "Set DMA masks\n");
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       dev_dbg(dev, "Alloc IDXD context\n");
+       idxd = idxd_alloc(pdev, iomap);
+       if (!idxd)
+               return -ENOMEM;
+
+       idxd_set_type(idxd);
+
+       dev_dbg(dev, "Set PCI master\n");
+       pci_set_master(pdev);
+       pci_set_drvdata(pdev, idxd);
+
+       idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+       rc = idxd_probe(idxd);
+       if (rc) {
+               dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+               return -ENODEV;
+       }
+
+       rc = idxd_setup_sysfs(idxd);
+       if (rc) {
+               dev_err(dev, "IDXD sysfs setup failed\n");
+               return -ENODEV;
+       }
+
+       idxd->state = IDXD_DEV_CONF_READY;
+
+       dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+                idxd->hw.version);
+
+       return 0;
+}
+
+static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+{
+       struct idxd_desc *desc, *itr;
+       struct llist_node *head;
+
+       head = llist_del_all(&ie->pending_llist);
+       if (!head)
+               return;
+
+       llist_for_each_entry_safe(desc, itr, head, llnode) {
+               idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+               idxd_free_desc(desc->wq, desc);
+       }
+}
+
+static void idxd_flush_work_list(struct idxd_irq_entry *ie)
+{
+       struct idxd_desc *desc, *iter;
+
+       list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+               list_del(&desc->list);
+               idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+               idxd_free_desc(desc->wq, desc);
+       }
+}
+
+static void idxd_shutdown(struct pci_dev *pdev)
+{
+       struct idxd_device *idxd = pci_get_drvdata(pdev);
+       int rc, i;
+       struct idxd_irq_entry *irq_entry;
+       int msixcnt = pci_msix_vec_count(pdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       rc = idxd_device_disable(idxd);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       if (rc)
+               dev_err(&pdev->dev, "Disabling device failed\n");
+
+       dev_dbg(&pdev->dev, "%s called\n", __func__);
+       idxd_mask_msix_vectors(idxd);
+       idxd_mask_error_interrupts(idxd);
+
+       for (i = 0; i < msixcnt; i++) {
+               irq_entry = &idxd->irq_entries[i];
+               synchronize_irq(idxd->msix_entries[i].vector);
+               if (i == 0)
+                       continue;
+               idxd_flush_pending_llist(irq_entry);
+               idxd_flush_work_list(irq_entry);
+       }
+}
+
+static void idxd_remove(struct pci_dev *pdev)
+{
+       struct idxd_device *idxd = pci_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s called\n", __func__);
+       idxd_cleanup_sysfs(idxd);
+       idxd_shutdown(pdev);
+       idxd_wqs_free_lock(idxd);
+       mutex_lock(&idxd_idr_lock);
+       idr_remove(&idxd_idrs[idxd->type], idxd->id);
+       mutex_unlock(&idxd_idr_lock);
+}
+
+static struct pci_driver idxd_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = idxd_pci_tbl,
+       .probe          = idxd_pci_probe,
+       .remove         = idxd_remove,
+       .shutdown       = idxd_shutdown,
+};
+
+static int __init idxd_init_module(void)
+{
+       int err, i;
+
+       /*
+        * If the CPU does not support write512, there's no point in
+        * enumerating the device. We can not utilize it.
+        */
+       if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+               pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+               return -ENODEV;
+       }
+
+       pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
+               DRV_NAME, IDXD_DRIVER_VERSION);
+
+       mutex_init(&idxd_idr_lock);
+       for (i = 0; i < IDXD_TYPE_MAX; i++)
+               idr_init(&idxd_idrs[i]);
+
+       err = idxd_register_bus_type();
+       if (err < 0)
+               return err;
+
+       err = idxd_register_driver();
+       if (err < 0)
+               goto err_idxd_driver_register;
+
+       err = idxd_cdev_register();
+       if (err)
+               goto err_cdev_register;
+
+       err = pci_register_driver(&idxd_pci_driver);
+       if (err)
+               goto err_pci_register;
+
+       return 0;
+
+err_pci_register:
+       idxd_cdev_remove();
+err_cdev_register:
+       idxd_unregister_driver();
+err_idxd_driver_register:
+       idxd_unregister_bus_type();
+       return err;
+}
+module_init(idxd_init_module);
+
+static void __exit idxd_exit_module(void)
+{
+       pci_unregister_driver(&idxd_pci_driver);
+       idxd_cdev_remove();
+       idxd_unregister_bus_type();
+}
+module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
new file mode 100644 (file)
index 0000000..d6fcd2e
--- /dev/null
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+       int i;
+
+       lockdep_assert_held(&idxd->dev_lock);
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               wq->state = IDXD_WQ_DISABLED;
+       }
+}
+
+static int idxd_restart(struct idxd_device *idxd)
+{
+       int i, rc;
+
+       lockdep_assert_held(&idxd->dev_lock);
+
+       rc = __idxd_device_reset(idxd);
+       if (rc < 0)
+               goto out;
+
+       rc = idxd_device_config(idxd);
+       if (rc < 0)
+               goto out;
+
+       rc = idxd_device_enable(idxd);
+       if (rc < 0)
+               goto out;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               if (wq->state == IDXD_WQ_ENABLED) {
+                       rc = idxd_wq_enable(wq);
+                       if (rc < 0) {
+                               dev_warn(&idxd->pdev->dev,
+                                        "Unable to re-enable wq %s\n",
+                                        dev_name(&wq->conf_dev));
+                       }
+               }
+       }
+
+       return 0;
+
+ out:
+       idxd_device_wqs_clear_state(idxd);
+       idxd->state = IDXD_DEV_HALTED;
+       return rc;
+}
+
+irqreturn_t idxd_irq_handler(int vec, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       struct idxd_device *idxd = irq_entry->idxd;
+
+       idxd_mask_msix_vector(idxd, irq_entry->id);
+       return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       struct idxd_device *idxd = irq_entry->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       union gensts_reg gensts;
+       u32 cause, val = 0;
+       int i, rc;
+       bool err = false;
+
+       cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+       if (cause & IDXD_INTC_ERR) {
+               spin_lock_bh(&idxd->dev_lock);
+               for (i = 0; i < 4; i++)
+                       idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+                                       IDXD_SWERR_OFFSET + i * sizeof(u64));
+               iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+               if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+                       int id = idxd->sw_err.wq_idx;
+                       struct idxd_wq *wq = &idxd->wqs[id];
+
+                       if (wq->type == IDXD_WQT_USER)
+                               wake_up_interruptible(&wq->idxd_cdev.err_queue);
+               } else {
+                       int i;
+
+                       for (i = 0; i < idxd->max_wqs; i++) {
+                               struct idxd_wq *wq = &idxd->wqs[i];
+
+                               if (wq->type == IDXD_WQT_USER)
+                                       wake_up_interruptible(&wq->idxd_cdev.err_queue);
+                       }
+               }
+
+               spin_unlock_bh(&idxd->dev_lock);
+               val |= IDXD_INTC_ERR;
+
+               for (i = 0; i < 4; i++)
+                       dev_warn(dev, "err[%d]: %#16.16llx\n",
+                                i, idxd->sw_err.bits[i]);
+               err = true;
+       }
+
+       if (cause & IDXD_INTC_CMD) {
+               /* Driver does use command interrupts */
+               val |= IDXD_INTC_CMD;
+       }
+
+       if (cause & IDXD_INTC_OCCUPY) {
+               /* Driver does not utilize occupancy interrupt */
+               val |= IDXD_INTC_OCCUPY;
+       }
+
+       if (cause & IDXD_INTC_PERFMON_OVFL) {
+               /*
+                * Driver does not utilize perfmon counter overflow interrupt
+                * yet.
+                */
+               val |= IDXD_INTC_PERFMON_OVFL;
+       }
+
+       val ^= cause;
+       if (val)
+               dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
+                             val);
+
+       iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+       if (!err)
+               return IRQ_HANDLED;
+
+       gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+       if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+               spin_lock_bh(&idxd->dev_lock);
+               if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+                       rc = idxd_restart(idxd);
+                       if (rc < 0)
+                               dev_err(&idxd->pdev->dev,
+                                       "idxd restart failed, device halt.");
+               } else {
+                       idxd_device_wqs_clear_state(idxd);
+                       idxd->state = IDXD_DEV_HALTED;
+                       dev_err(&idxd->pdev->dev,
+                               "idxd halted, need %s.\n",
+                               gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
+                               "FLR" : "system reset");
+               }
+               spin_unlock_bh(&idxd->dev_lock);
+       }
+
+       idxd_unmask_msix_vector(idxd, irq_entry->id);
+       return IRQ_HANDLED;
+}
+
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+                                    int *processed)
+{
+       struct idxd_desc *desc, *t;
+       struct llist_node *head;
+       int queued = 0;
+
+       head = llist_del_all(&irq_entry->pending_llist);
+       if (!head)
+               return 0;
+
+       llist_for_each_entry_safe(desc, t, head, llnode) {
+               if (desc->completion->status) {
+                       idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+                       idxd_free_desc(desc->wq, desc);
+                       (*processed)++;
+               } else {
+                       list_add_tail(&desc->list, &irq_entry->work_list);
+                       queued++;
+               }
+       }
+
+       return queued;
+}
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+                                int *processed)
+{
+       struct list_head *node, *next;
+       int queued = 0;
+
+       if (list_empty(&irq_entry->work_list))
+               return 0;
+
+       list_for_each_safe(node, next, &irq_entry->work_list) {
+               struct idxd_desc *desc =
+                       container_of(node, struct idxd_desc, list);
+
+               if (desc->completion->status) {
+                       list_del(&desc->list);
+                       /* process and callback */
+                       idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+                       idxd_free_desc(desc->wq, desc);
+                       (*processed)++;
+               } else {
+                       queued++;
+               }
+       }
+
+       return queued;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       int rc, processed = 0, retry = 0;
+
+       /*
+        * There are two lists we are processing. The pending_llist is where
+        * submmiter adds all the submitted descriptor after sending it to
+        * the workqueue. It's a lockless singly linked list. The work_list
+        * is the common linux double linked list. We are in a scenario of
+        * multiple producers and a single consumer. The producers are all
+        * the kernel submitters of descriptors, and the consumer is the
+        * kernel irq handler thread for the msix vector when using threaded
+        * irq. To work with the restrictions of llist to remain lockless,
+        * we are doing the following steps:
+        * 1. Iterate through the work_list and process any completed
+        *    descriptor. Delete the completed entries during iteration.
+        * 2. llist_del_all() from the pending list.
+        * 3. Iterate through the llist that was deleted from the pending list
+        *    and process the completed entries.
+        * 4. If the entry is still waiting on hardware, list_add_tail() to
+        *    the work_list.
+        * 5. Repeat until no more descriptors.
+        */
+       do {
+               rc = irq_process_work_list(irq_entry, &processed);
+               if (rc != 0) {
+                       retry++;
+                       continue;
+               }
+
+               rc = irq_process_pending_llist(irq_entry, &processed);
+       } while (rc != 0 && retry != 10);
+
+       idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+
+       if (processed == 0)
+               return IRQ_NONE;
+
+       return IRQ_HANDLED;
+}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
new file mode 100644 (file)
index 0000000..a39e7ae
--- /dev/null
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_REGISTERS_H_
+#define _IDXD_REGISTERS_H_
+
+/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0   0x0b25
+
+#define IDXD_MMIO_BAR          0
+#define IDXD_WQ_BAR            2
+#define IDXD_PORTAL_SIZE       0x4000
+
+/* MMIO Device BAR0 Registers */
+#define IDXD_VER_OFFSET                        0x00
+#define IDXD_VER_MAJOR_MASK            0xf0
+#define IDXD_VER_MINOR_MASK            0x0f
+#define GET_IDXD_VER_MAJOR(x)          (((x) & IDXD_VER_MAJOR_MASK) >> 4)
+#define GET_IDXD_VER_MINOR(x)          ((x) & IDXD_VER_MINOR_MASK)
+
+union gen_cap_reg {
+       struct {
+               u64 block_on_fault:1;
+               u64 overlap_copy:1;
+               u64 cache_control_mem:1;
+               u64 cache_control_cache:1;
+               u64 rsvd:3;
+               u64 int_handle_req:1;
+               u64 dest_readback:1;
+               u64 drain_readback:1;
+               u64 rsvd2:6;
+               u64 max_xfer_shift:5;
+               u64 max_batch_shift:4;
+               u64 max_ims_mult:6;
+               u64 config_en:1;
+               u64 max_descs_per_engine:8;
+               u64 rsvd3:24;
+       };
+       u64 bits;
+} __packed;
+#define IDXD_GENCAP_OFFSET             0x10
+
+union wq_cap_reg {
+       struct {
+               u64 total_wq_size:16;
+               u64 num_wqs:8;
+               u64 rsvd:24;
+               u64 shared_mode:1;
+               u64 dedicated_mode:1;
+               u64 rsvd2:1;
+               u64 priority:1;
+               u64 occupancy:1;
+               u64 occupancy_int:1;
+               u64 rsvd3:10;
+       };
+       u64 bits;
+} __packed;
+#define IDXD_WQCAP_OFFSET              0x20
+
+union group_cap_reg {
+       struct {
+               u64 num_groups:8;
+               u64 total_tokens:8;
+               u64 token_en:1;
+               u64 token_limit:1;
+               u64 rsvd:46;
+       };
+       u64 bits;
+} __packed;
+#define IDXD_GRPCAP_OFFSET             0x30
+
+union engine_cap_reg {
+       struct {
+               u64 num_engines:8;
+               u64 rsvd:56;
+       };
+       u64 bits;
+} __packed;
+
+#define IDXD_ENGCAP_OFFSET             0x38
+
+#define IDXD_OPCAP_NOOP                        0x0001
+#define IDXD_OPCAP_BATCH                       0x0002
+#define IDXD_OPCAP_MEMMOVE             0x0008
+struct opcap {
+       u64 bits[4];
+};
+
+#define IDXD_OPCAP_OFFSET              0x40
+
+#define IDXD_TABLE_OFFSET              0x60
+union offsets_reg {
+       struct {
+               u64 grpcfg:16;
+               u64 wqcfg:16;
+               u64 msix_perm:16;
+               u64 ims:16;
+               u64 perfmon:16;
+               u64 rsvd:48;
+       };
+       u64 bits[2];
+} __packed;
+
+#define IDXD_GENCFG_OFFSET             0x80
+union gencfg_reg {
+       struct {
+               u32 token_limit:8;
+               u32 rsvd:4;
+               u32 user_int_en:1;
+               u32 rsvd2:19;
+       };
+       u32 bits;
+} __packed;
+
+#define IDXD_GENCTRL_OFFSET            0x88
+union genctrl_reg {
+       struct {
+               u32 softerr_int_en:1;
+               u32 rsvd:31;
+       };
+       u32 bits;
+} __packed;
+
+#define IDXD_GENSTATS_OFFSET           0x90
+union gensts_reg {
+       struct {
+               u32 state:2;
+               u32 reset_type:2;
+               u32 rsvd:28;
+       };
+       u32 bits;
+} __packed;
+
+enum idxd_device_status_state {
+       IDXD_DEVICE_STATE_DISABLED = 0,
+       IDXD_DEVICE_STATE_ENABLED,
+       IDXD_DEVICE_STATE_DRAIN,
+       IDXD_DEVICE_STATE_HALT,
+};
+
+enum idxd_device_reset_type {
+       IDXD_DEVICE_RESET_SOFTWARE = 0,
+       IDXD_DEVICE_RESET_FLR,
+       IDXD_DEVICE_RESET_WARM,
+       IDXD_DEVICE_RESET_COLD,
+};
+
+#define IDXD_INTCAUSE_OFFSET           0x98
+#define IDXD_INTC_ERR                  0x01
+#define IDXD_INTC_CMD                  0x02
+#define IDXD_INTC_OCCUPY                       0x04
+#define IDXD_INTC_PERFMON_OVFL         0x08
+
+#define IDXD_CMD_OFFSET                        0xa0
+union idxd_command_reg {
+       struct {
+               u32 operand:20;
+               u32 cmd:5;
+               u32 rsvd:6;
+               u32 int_req:1;
+       };
+       u32 bits;
+} __packed;
+
+enum idxd_cmd {
+       IDXD_CMD_ENABLE_DEVICE = 1,
+       IDXD_CMD_DISABLE_DEVICE,
+       IDXD_CMD_DRAIN_ALL,
+       IDXD_CMD_ABORT_ALL,
+       IDXD_CMD_RESET_DEVICE,
+       IDXD_CMD_ENABLE_WQ,
+       IDXD_CMD_DISABLE_WQ,
+       IDXD_CMD_DRAIN_WQ,
+       IDXD_CMD_ABORT_WQ,
+       IDXD_CMD_RESET_WQ,
+       IDXD_CMD_DRAIN_PASID,
+       IDXD_CMD_ABORT_PASID,
+       IDXD_CMD_REQUEST_INT_HANDLE,
+};
+
+#define IDXD_CMDSTS_OFFSET             0xa8
+union cmdsts_reg {
+       struct {
+               u8 err;
+               u16 result;
+               u8 rsvd:7;
+               u8 active:1;
+       };
+       u32 bits;
+} __packed;
+#define IDXD_CMDSTS_ACTIVE             0x80000000
+
+enum idxd_cmdsts_err {
+       IDXD_CMDSTS_SUCCESS = 0,
+       IDXD_CMDSTS_INVAL_CMD,
+       IDXD_CMDSTS_INVAL_WQIDX,
+       IDXD_CMDSTS_HW_ERR,
+       /* enable device errors */
+       IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
+       IDXD_CMDSTS_ERR_CONFIG,
+       IDXD_CMDSTS_ERR_BUSMASTER_EN,
+       IDXD_CMDSTS_ERR_PASID_INVAL,
+       IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
+       IDXD_CMDSTS_ERR_GRP_CONFIG,
+       IDXD_CMDSTS_ERR_GRP_CONFIG2,
+       IDXD_CMDSTS_ERR_GRP_CONFIG3,
+       IDXD_CMDSTS_ERR_GRP_CONFIG4,
+       /* enable wq errors */
+       IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
+       IDXD_CMDSTS_ERR_WQ_ENABLED,
+       IDXD_CMDSTS_ERR_WQ_SIZE,
+       IDXD_CMDSTS_ERR_WQ_PRIOR,
+       IDXD_CMDSTS_ERR_WQ_MODE,
+       IDXD_CMDSTS_ERR_BOF_EN,
+       IDXD_CMDSTS_ERR_PASID_EN,
+       IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
+       IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
+       /* disable device errors */
+       IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
+       /* disable WQ, drain WQ, abort WQ, reset WQ */
+       IDXD_CMDSTS_ERR_DEV_NOT_EN,
+       /* request interrupt handle */
+       IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
+       IDXD_CMDSTS_ERR_NO_HANDLE,
+};
+
+#define IDXD_SWERR_OFFSET              0xc0
+#define IDXD_SWERR_VALID               0x00000001
+#define IDXD_SWERR_OVERFLOW            0x00000002
+#define IDXD_SWERR_ACK                 (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
+union sw_err_reg {
+       struct {
+               u64 valid:1;
+               u64 overflow:1;
+               u64 desc_valid:1;
+               u64 wq_idx_valid:1;
+               u64 batch:1;
+               u64 fault_rw:1;
+               u64 priv:1;
+               u64 rsvd:1;
+               u64 error:8;
+               u64 wq_idx:8;
+               u64 rsvd2:8;
+               u64 operation:8;
+               u64 pasid:20;
+               u64 rsvd3:4;
+
+               u64 batch_idx:16;
+               u64 rsvd4:16;
+               u64 invalid_flags:32;
+
+               u64 fault_addr;
+
+               u64 rsvd5;
+       };
+       u64 bits[4];
+} __packed;
+
+union msix_perm {
+       struct {
+               u32 rsvd:2;
+               u32 ignore:1;
+               u32 pasid_en:1;
+               u32 rsvd2:8;
+               u32 pasid:20;
+       };
+       u32 bits;
+} __packed;
+
+union group_flags {
+       struct {
+               u32 tc_a:3;
+               u32 tc_b:3;
+               u32 rsvd:1;
+               u32 use_token_limit:1;
+               u32 tokens_reserved:8;
+               u32 rsvd2:4;
+               u32 tokens_allowed:8;
+               u32 rsvd3:4;
+       };
+       u32 bits;
+} __packed;
+
+struct grpcfg {
+       u64 wqs[4];
+       u64 engines;
+       union group_flags flags;
+} __packed;
+
+union wqcfg {
+       struct {
+               /* bytes 0-3 */
+               u16 wq_size;
+               u16 rsvd;
+
+               /* bytes 4-7 */
+               u16 wq_thresh;
+               u16 rsvd1;
+
+               /* bytes 8-11 */
+               u32 mode:1;     /* shared or dedicated */
+               u32 bof:1;      /* block on fault */
+               u32 rsvd2:2;
+               u32 priority:4;
+               u32 pasid:20;
+               u32 pasid_en:1;
+               u32 priv:1;
+               u32 rsvd3:2;
+
+               /* bytes 12-15 */
+               u32 max_xfer_shift:5;
+               u32 max_batch_shift:4;
+               u32 rsvd4:23;
+
+               /* bytes 16-19 */
+               u16 occupancy_inth;
+               u16 occupancy_table_sel:1;
+               u16 rsvd5:15;
+
+               /* bytes 20-23 */
+               u16 occupancy_limit;
+               u16 occupancy_int_en:1;
+               u16 rsvd6:15;
+
+               /* bytes 24-27 */
+               u16 occupancy;
+               u16 occupancy_int:1;
+               u16 rsvd7:12;
+               u16 mode_support:1;
+               u16 wq_state:2;
+
+               /* bytes 28-31 */
+               u32 rsvd8;
+       };
+       u32 bits[8];
+} __packed;
+#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
new file mode 100644 (file)
index 0000000..45a0c58
--- /dev/null
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+       struct idxd_desc *desc;
+       int idx;
+       struct idxd_device *idxd = wq->idxd;
+
+       if (idxd->state != IDXD_DEV_ENABLED)
+               return ERR_PTR(-EIO);
+
+       if (optype == IDXD_OP_BLOCK)
+               percpu_down_read(&wq->submit_lock);
+       else if (!percpu_down_read_trylock(&wq->submit_lock))
+               return ERR_PTR(-EBUSY);
+
+       if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+               int rc;
+
+               if (optype == IDXD_OP_NONBLOCK) {
+                       percpu_up_read(&wq->submit_lock);
+                       return ERR_PTR(-EAGAIN);
+               }
+
+               percpu_up_read(&wq->submit_lock);
+               percpu_down_write(&wq->submit_lock);
+               rc = wait_event_interruptible(wq->submit_waitq,
+                                             atomic_add_unless(&wq->dq_count,
+                                                               1, wq->size) ||
+                                              idxd->state != IDXD_DEV_ENABLED);
+               percpu_up_write(&wq->submit_lock);
+               if (rc < 0)
+                       return ERR_PTR(-EINTR);
+               if (idxd->state != IDXD_DEV_ENABLED)
+                       return ERR_PTR(-EIO);
+       } else {
+               percpu_up_read(&wq->submit_lock);
+       }
+
+       idx = sbitmap_get(&wq->sbmap, 0, false);
+       if (idx < 0) {
+               atomic_dec(&wq->dq_count);
+               return ERR_PTR(-EAGAIN);
+       }
+
+       desc = wq->descs[idx];
+       memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+       memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+       return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+       atomic_dec(&wq->dq_count);
+
+       sbitmap_clear_bit(&wq->sbmap, desc->id);
+       wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+       struct idxd_device *idxd = wq->idxd;
+       int vec = desc->hw->int_handle;
+       void __iomem *portal;
+
+       if (idxd->state != IDXD_DEV_ENABLED)
+               return -EIO;
+
+       portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+       /*
+        * The wmb() flushes writes to coherent DMA data before possibly
+        * triggering a DMA read. The wmb() is necessary even on UP because
+        * the recipient is a device.
+        */
+       wmb();
+       iosubmit_cmds512(portal, desc->hw, 1);
+
+       /*
+        * Pending the descriptor to the lockless list for the irq_entry
+        * that we designated the descriptor to.
+        */
+       if (desc->hw->flags & IDXD_OP_FLAG_RCI)
+               llist_add(&desc->llnode,
+                         &idxd->irq_entries[vec].pending_llist);
+
+       return 0;
+}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
new file mode 100644 (file)
index 0000000..849c50a
--- /dev/null
@@ -0,0 +1,1528 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+static char *idxd_wq_type_names[] = {
+       [IDXD_WQT_NONE]         = "none",
+       [IDXD_WQT_KERNEL]       = "kernel",
+       [IDXD_WQT_USER]         = "user",
+};
+
+static void idxd_conf_device_release(struct device *dev)
+{
+       dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+}
+
+static struct device_type idxd_group_device_type = {
+       .name = "group",
+       .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_wq_device_type = {
+       .name = "wq",
+       .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_engine_device_type = {
+       .name = "engine",
+       .release = idxd_conf_device_release,
+};
+
+static struct device_type dsa_device_type = {
+       .name = "dsa",
+       .release = idxd_conf_device_release,
+};
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+       return dev ? dev->type == &dsa_device_type : false;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+       return is_dsa_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+       return dev ? dev->type == &idxd_wq_device_type : false;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+       if (wq->type == IDXD_WQT_KERNEL &&
+           strcmp(wq->name, "dmaengine") == 0)
+               return true;
+       return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+       return wq->type == IDXD_WQT_USER ? true : false;
+}
+
+static int idxd_config_bus_match(struct device *dev,
+                                struct device_driver *drv)
+{
+       int matched = 0;
+
+       if (is_idxd_dev(dev)) {
+               struct idxd_device *idxd = confdev_to_idxd(dev);
+
+               if (idxd->state != IDXD_DEV_CONF_READY)
+                       return 0;
+               matched = 1;
+       } else if (is_idxd_wq_dev(dev)) {
+               struct idxd_wq *wq = confdev_to_wq(dev);
+               struct idxd_device *idxd = wq->idxd;
+
+               if (idxd->state < IDXD_DEV_CONF_READY)
+                       return 0;
+
+               if (wq->state != IDXD_WQ_DISABLED) {
+                       dev_dbg(dev, "%s not disabled\n", dev_name(dev));
+                       return 0;
+               }
+               matched = 1;
+       }
+
+       if (matched)
+               dev_dbg(dev, "%s matched\n", dev_name(dev));
+
+       return matched;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+       int rc;
+       unsigned long flags;
+
+       dev_dbg(dev, "%s called\n", __func__);
+
+       if (is_idxd_dev(dev)) {
+               struct idxd_device *idxd = confdev_to_idxd(dev);
+
+               if (idxd->state != IDXD_DEV_CONF_READY) {
+                       dev_warn(dev, "Device not ready for config\n");
+                       return -EBUSY;
+               }
+
+               if (!try_module_get(THIS_MODULE))
+                       return -ENXIO;
+
+               spin_lock_irqsave(&idxd->dev_lock, flags);
+
+               /* Perform IDXD configuration and enabling */
+               rc = idxd_device_config(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       dev_warn(dev, "Device config failed: %d\n", rc);
+                       return rc;
+               }
+
+               /* start device */
+               rc = idxd_device_enable(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       dev_warn(dev, "Device enable failed: %d\n", rc);
+                       return rc;
+               }
+
+               spin_unlock_irqrestore(&idxd->dev_lock, flags);
+               dev_info(dev, "Device %s enabled\n", dev_name(dev));
+
+               rc = idxd_register_dma_device(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       dev_dbg(dev, "Failed to register dmaengine device\n");
+                       return rc;
+               }
+               return 0;
+       } else if (is_idxd_wq_dev(dev)) {
+               struct idxd_wq *wq = confdev_to_wq(dev);
+               struct idxd_device *idxd = wq->idxd;
+
+               mutex_lock(&wq->wq_lock);
+
+               if (idxd->state != IDXD_DEV_ENABLED) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "Enabling while device not enabled.\n");
+                       return -EPERM;
+               }
+
+               if (wq->state != IDXD_WQ_DISABLED) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+                       return -EBUSY;
+               }
+
+               if (!wq->group) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ not attached to group.\n");
+                       return -EINVAL;
+               }
+
+               if (strlen(wq->name) == 0) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ name not set.\n");
+                       return -EINVAL;
+               }
+
+               rc = idxd_wq_alloc_resources(wq);
+               if (rc < 0) {
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ resource alloc failed\n");
+                       return rc;
+               }
+
+               spin_lock_irqsave(&idxd->dev_lock, flags);
+               rc = idxd_device_config(idxd);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "Writing WQ %d config failed: %d\n",
+                                wq->id, rc);
+                       return rc;
+               }
+
+               rc = idxd_wq_enable(wq);
+               if (rc < 0) {
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       mutex_unlock(&wq->wq_lock);
+                       dev_warn(dev, "WQ %d enabling failed: %d\n",
+                                wq->id, rc);
+                       return rc;
+               }
+               spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+               rc = idxd_wq_map_portal(wq);
+               if (rc < 0) {
+                       dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+                       rc = idxd_wq_disable(wq);
+                       if (rc < 0)
+                               dev_warn(dev, "IDXD wq disable failed\n");
+                       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+                       mutex_unlock(&wq->wq_lock);
+                       return rc;
+               }
+
+               wq->client_count = 0;
+
+               dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+               if (is_idxd_wq_dmaengine(wq)) {
+                       rc = idxd_register_dma_channel(wq);
+                       if (rc < 0) {
+                               dev_dbg(dev, "DMA channel register failed\n");
+                               mutex_unlock(&wq->wq_lock);
+                               return rc;
+                       }
+               } else if (is_idxd_wq_cdev(wq)) {
+                       rc = idxd_wq_add_cdev(wq);
+                       if (rc < 0) {
+                               dev_dbg(dev, "Cdev creation failed\n");
+                               mutex_unlock(&wq->wq_lock);
+                               return rc;
+                       }
+               }
+
+               mutex_unlock(&wq->wq_lock);
+               return 0;
+       }
+
+       return -ENODEV;
+}
+
+static void disable_wq(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       unsigned long flags;
+       int rc;
+
+       mutex_lock(&wq->wq_lock);
+       dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+       if (wq->state == IDXD_WQ_DISABLED) {
+               mutex_unlock(&wq->wq_lock);
+               return;
+       }
+
+       if (is_idxd_wq_dmaengine(wq))
+               idxd_unregister_dma_channel(wq);
+       else if (is_idxd_wq_cdev(wq))
+               idxd_wq_del_cdev(wq);
+
+       if (idxd_wq_refcount(wq))
+               dev_warn(dev, "Clients has claim on wq %d: %d\n",
+                        wq->id, idxd_wq_refcount(wq));
+
+       idxd_wq_unmap_portal(wq);
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       rc = idxd_wq_disable(wq);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+       idxd_wq_free_resources(wq);
+       wq->client_count = 0;
+       mutex_unlock(&wq->wq_lock);
+
+       if (rc < 0)
+               dev_warn(dev, "Failed to disable %s: %d\n",
+                        dev_name(&wq->conf_dev), rc);
+       else
+               dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+}
+
+static int idxd_config_bus_remove(struct device *dev)
+{
+       int rc;
+       unsigned long flags;
+
+       dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
+
+       /* disable workqueue here */
+       if (is_idxd_wq_dev(dev)) {
+               struct idxd_wq *wq = confdev_to_wq(dev);
+
+               disable_wq(wq);
+       } else if (is_idxd_dev(dev)) {
+               struct idxd_device *idxd = confdev_to_idxd(dev);
+               int i;
+
+               dev_dbg(dev, "%s removing dev %s\n", __func__,
+                       dev_name(&idxd->conf_dev));
+               for (i = 0; i < idxd->max_wqs; i++) {
+                       struct idxd_wq *wq = &idxd->wqs[i];
+
+                       if (wq->state == IDXD_WQ_DISABLED)
+                               continue;
+                       dev_warn(dev, "Active wq %d on disable %s.\n", i,
+                                dev_name(&idxd->conf_dev));
+                       device_release_driver(&wq->conf_dev);
+               }
+
+               idxd_unregister_dma_device(idxd);
+               spin_lock_irqsave(&idxd->dev_lock, flags);
+               rc = idxd_device_disable(idxd);
+               spin_unlock_irqrestore(&idxd->dev_lock, flags);
+               module_put(THIS_MODULE);
+               if (rc < 0)
+                       dev_warn(dev, "Device disable failed\n");
+               else
+                       dev_info(dev, "Device %s disabled\n", dev_name(dev));
+
+       }
+
+       return 0;
+}
+
+static void idxd_config_bus_shutdown(struct device *dev)
+{
+       dev_dbg(dev, "%s called\n", __func__);
+}
+
+struct bus_type dsa_bus_type = {
+       .name = "dsa",
+       .match = idxd_config_bus_match,
+       .probe = idxd_config_bus_probe,
+       .remove = idxd_config_bus_remove,
+       .shutdown = idxd_config_bus_shutdown,
+};
+
+static struct bus_type *idxd_bus_types[] = {
+       &dsa_bus_type
+};
+
+static struct idxd_device_driver dsa_drv = {
+       .drv = {
+               .name = "dsa",
+               .bus = &dsa_bus_type,
+               .owner = THIS_MODULE,
+               .mod_name = KBUILD_MODNAME,
+       },
+};
+
+static struct idxd_device_driver *idxd_drvs[] = {
+       &dsa_drv
+};
+
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+{
+       return idxd_bus_types[idxd->type];
+}
+
+static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+{
+       if (idxd->type == IDXD_TYPE_DSA)
+               return &dsa_device_type;
+       else
+               return NULL;
+}
+
+/* IDXD generic driver setup */
+int idxd_register_driver(void)
+{
+       int i, rc;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               rc = driver_register(&idxd_drvs[i]->drv);
+               if (rc < 0)
+                       goto drv_fail;
+       }
+
+       return 0;
+
+drv_fail:
+       for (; i > 0; i--)
+               driver_unregister(&idxd_drvs[i]->drv);
+       return rc;
+}
+
+void idxd_unregister_driver(void)
+{
+       int i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++)
+               driver_unregister(&idxd_drvs[i]->drv);
+}
+
+/* IDXD engine attributes */
+static ssize_t engine_group_id_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct idxd_engine *engine =
+               container_of(dev, struct idxd_engine, conf_dev);
+
+       if (engine->group)
+               return sprintf(buf, "%d\n", engine->group->id);
+       else
+               return sprintf(buf, "%d\n", -1);
+}
+
+static ssize_t engine_group_id_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct idxd_engine *engine =
+               container_of(dev, struct idxd_engine, conf_dev);
+       struct idxd_device *idxd = engine->idxd;
+       long id;
+       int rc;
+       struct idxd_group *prevg, *group;
+
+       rc = kstrtol(buf, 10, &id);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (id > idxd->max_groups - 1 || id < -1)
+               return -EINVAL;
+
+       if (id == -1) {
+               if (engine->group) {
+                       engine->group->num_engines--;
+                       engine->group = NULL;
+               }
+               return count;
+       }
+
+       group = &idxd->groups[id];
+       prevg = engine->group;
+
+       if (prevg)
+               prevg->num_engines--;
+       engine->group = &idxd->groups[id];
+       engine->group->num_engines++;
+
+       return count;
+}
+
+static struct device_attribute dev_attr_engine_group =
+               __ATTR(group_id, 0644, engine_group_id_show,
+                      engine_group_id_store);
+
+static struct attribute *idxd_engine_attributes[] = {
+       &dev_attr_engine_group.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_engine_attribute_group = {
+       .attrs = idxd_engine_attributes,
+};
+
+static const struct attribute_group *idxd_engine_attribute_groups[] = {
+       &idxd_engine_attribute_group,
+       NULL,
+};
+
+/* Group attributes */
+
+static void idxd_set_free_tokens(struct idxd_device *idxd)
+{
+       int i, tokens;
+
+       for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *g = &idxd->groups[i];
+
+               tokens += g->tokens_reserved;
+       }
+
+       idxd->nr_tokens = idxd->max_tokens - tokens;
+}
+
+static ssize_t group_tokens_reserved_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%u\n", group->tokens_reserved);
+}
+
+static ssize_t group_tokens_reserved_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (idxd->token_limit == 0)
+               return -EPERM;
+
+       if (val > idxd->max_tokens)
+               return -EINVAL;
+
+       if (val > idxd->nr_tokens)
+               return -EINVAL;
+
+       group->tokens_reserved = val;
+       idxd_set_free_tokens(idxd);
+       return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_reserved =
+               __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
+                      group_tokens_reserved_store);
+
+static ssize_t group_tokens_allowed_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%u\n", group->tokens_allowed);
+}
+
+static ssize_t group_tokens_allowed_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (idxd->token_limit == 0)
+               return -EPERM;
+       if (val < 4 * group->num_engines ||
+           val > group->tokens_reserved + idxd->nr_tokens)
+               return -EINVAL;
+
+       group->tokens_allowed = val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_allowed =
+               __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
+                      group_tokens_allowed_store);
+
+static ssize_t group_use_token_limit_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%u\n", group->use_token_limit);
+}
+
+static ssize_t group_use_token_limit_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (idxd->token_limit == 0)
+               return -EPERM;
+
+       group->use_token_limit = !!val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_use_token_limit =
+               __ATTR(use_token_limit, 0644, group_use_token_limit_show,
+                      group_use_token_limit_store);
+
+static ssize_t group_engines_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       int i, rc = 0;
+       char *tmp = buf;
+       struct idxd_device *idxd = group->idxd;
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               if (!engine->group)
+                       continue;
+
+               if (engine->group->id == group->id)
+                       rc += sprintf(tmp + rc, "engine%d.%d ",
+                                       idxd->id, engine->id);
+       }
+
+       rc--;
+       rc += sprintf(tmp + rc, "\n");
+
+       return rc;
+}
+
+static struct device_attribute dev_attr_group_engines =
+               __ATTR(engines, 0444, group_engines_show, NULL);
+
+static ssize_t group_work_queues_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       int i, rc = 0;
+       char *tmp = buf;
+       struct idxd_device *idxd = group->idxd;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               if (!wq->group)
+                       continue;
+
+               if (wq->group->id == group->id)
+                       rc += sprintf(tmp + rc, "wq%d.%d ",
+                                       idxd->id, wq->id);
+       }
+
+       rc--;
+       rc += sprintf(tmp + rc, "\n");
+
+       return rc;
+}
+
+static struct device_attribute dev_attr_group_work_queues =
+               __ATTR(work_queues, 0444, group_work_queues_show, NULL);
+
+static ssize_t group_traffic_class_a_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%d\n", group->tc_a);
+}
+
+static ssize_t group_traffic_class_a_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       long val;
+       int rc;
+
+       rc = kstrtol(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (val < 0 || val > 7)
+               return -EINVAL;
+
+       group->tc_a = val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_a =
+               __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
+                      group_traffic_class_a_store);
+
+static ssize_t group_traffic_class_b_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+
+       return sprintf(buf, "%d\n", group->tc_b);
+}
+
+static ssize_t group_traffic_class_b_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
+{
+       struct idxd_group *group =
+               container_of(dev, struct idxd_group, conf_dev);
+       struct idxd_device *idxd = group->idxd;
+       long val;
+       int rc;
+
+       rc = kstrtol(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (val < 0 || val > 7)
+               return -EINVAL;
+
+       group->tc_b = val;
+       return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_b =
+               __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
+                      group_traffic_class_b_store);
+
+static struct attribute *idxd_group_attributes[] = {
+       &dev_attr_group_work_queues.attr,
+       &dev_attr_group_engines.attr,
+       &dev_attr_group_use_token_limit.attr,
+       &dev_attr_group_tokens_allowed.attr,
+       &dev_attr_group_tokens_reserved.attr,
+       &dev_attr_group_traffic_class_a.attr,
+       &dev_attr_group_traffic_class_b.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_group_attribute_group = {
+       .attrs = idxd_group_attributes,
+};
+
+static const struct attribute_group *idxd_group_attribute_groups[] = {
+       &idxd_group_attribute_group,
+       NULL,
+};
+
+/* IDXD work queue attribs */
+static ssize_t wq_clients_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%d\n", wq->client_count);
+}
+
+static struct device_attribute dev_attr_wq_clients =
+               __ATTR(clients, 0444, wq_clients_show, NULL);
+
+static ssize_t wq_state_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       switch (wq->state) {
+       case IDXD_WQ_DISABLED:
+               return sprintf(buf, "disabled\n");
+       case IDXD_WQ_ENABLED:
+               return sprintf(buf, "enabled\n");
+       }
+
+       return sprintf(buf, "unknown\n");
+}
+
+static struct device_attribute dev_attr_wq_state =
+               __ATTR(state, 0444, wq_state_show, NULL);
+
+static ssize_t wq_group_id_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       if (wq->group)
+               return sprintf(buf, "%u\n", wq->group->id);
+       else
+               return sprintf(buf, "-1\n");
+}
+
+static ssize_t wq_group_id_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       struct idxd_device *idxd = wq->idxd;
+       long id;
+       int rc;
+       struct idxd_group *prevg, *group;
+
+       rc = kstrtol(buf, 10, &id);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (id > idxd->max_groups - 1 || id < -1)
+               return -EINVAL;
+
+       if (id == -1) {
+               if (wq->group) {
+                       wq->group->num_wqs--;
+                       wq->group = NULL;
+               }
+               return count;
+       }
+
+       group = &idxd->groups[id];
+       prevg = wq->group;
+
+       if (prevg)
+               prevg->num_wqs--;
+       wq->group = group;
+       group->num_wqs++;
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_group_id =
+               __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
+
+static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%s\n",
+                       wq_dedicated(wq) ? "dedicated" : "shared");
+}
+
+static ssize_t wq_mode_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       struct idxd_device *idxd = wq->idxd;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (sysfs_streq(buf, "dedicated")) {
+               set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+               wq->threshold = 0;
+       } else {
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_mode =
+               __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
+
+static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%u\n", wq->size);
+}
+
+static ssize_t wq_size_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       unsigned long size;
+       struct idxd_device *idxd = wq->idxd;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &size);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (size > idxd->max_wq_size)
+               return -EINVAL;
+
+       wq->size = size;
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_size =
+               __ATTR(size, 0644, wq_size_show, wq_size_store);
+
+static ssize_t wq_priority_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%u\n", wq->priority);
+}
+
+static ssize_t wq_priority_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       unsigned long prio;
+       struct idxd_device *idxd = wq->idxd;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &prio);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (prio > IDXD_MAX_PRIORITY)
+               return -EINVAL;
+
+       wq->priority = prio;
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_priority =
+               __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+
+static ssize_t wq_type_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       switch (wq->type) {
+       case IDXD_WQT_KERNEL:
+               return sprintf(buf, "%s\n",
+                              idxd_wq_type_names[IDXD_WQT_KERNEL]);
+       case IDXD_WQT_USER:
+               return sprintf(buf, "%s\n",
+                              idxd_wq_type_names[IDXD_WQT_USER]);
+       case IDXD_WQT_NONE:
+       default:
+               return sprintf(buf, "%s\n",
+                              idxd_wq_type_names[IDXD_WQT_NONE]);
+       }
+
+       return -EINVAL;
+}
+
+static ssize_t wq_type_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+       enum idxd_wq_type old_type;
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       old_type = wq->type;
+       if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+               wq->type = IDXD_WQT_KERNEL;
+       else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
+               wq->type = IDXD_WQT_USER;
+       else
+               wq->type = IDXD_WQT_NONE;
+
+       /* If we are changing queue type, clear the name */
+       if (wq->type != old_type)
+               memset(wq->name, 0, WQ_NAME_SIZE + 1);
+
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_type =
+               __ATTR(type, 0644, wq_type_show, wq_type_store);
+
+static ssize_t wq_name_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%s\n", wq->name);
+}
+
+static ssize_t wq_name_store(struct device *dev,
+                            struct device_attribute *attr, const char *buf,
+                            size_t count)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       if (wq->state != IDXD_WQ_DISABLED)
+               return -EPERM;
+
+       if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
+               return -EINVAL;
+
+       memset(wq->name, 0, WQ_NAME_SIZE + 1);
+       strncpy(wq->name, buf, WQ_NAME_SIZE);
+       strreplace(wq->name, '\n', '\0');
+       return count;
+}
+
+static struct device_attribute dev_attr_wq_name =
+               __ATTR(name, 0644, wq_name_show, wq_name_store);
+
+static ssize_t wq_cdev_minor_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+       return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+}
+
+static struct device_attribute dev_attr_wq_cdev_minor =
+               __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+
+static struct attribute *idxd_wq_attributes[] = {
+       &dev_attr_wq_clients.attr,
+       &dev_attr_wq_state.attr,
+       &dev_attr_wq_group_id.attr,
+       &dev_attr_wq_mode.attr,
+       &dev_attr_wq_size.attr,
+       &dev_attr_wq_priority.attr,
+       &dev_attr_wq_type.attr,
+       &dev_attr_wq_name.attr,
+       &dev_attr_wq_cdev_minor.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_wq_attribute_group = {
+       .attrs = idxd_wq_attributes,
+};
+
+static const struct attribute_group *idxd_wq_attribute_groups[] = {
+       &idxd_wq_attribute_group,
+       NULL,
+};
+
+/* IDXD device attribs */
+static ssize_t max_work_queues_size_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_wq_size);
+}
+static DEVICE_ATTR_RO(max_work_queues_size);
+
+static ssize_t max_groups_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_groups);
+}
+static DEVICE_ATTR_RO(max_groups);
+
+static ssize_t max_work_queues_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_wqs);
+}
+static DEVICE_ATTR_RO(max_work_queues);
+
+static ssize_t max_engines_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_engines);
+}
+static DEVICE_ATTR_RO(max_engines);
+
+static ssize_t numa_node_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t max_batch_size_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_batch_size);
+}
+static DEVICE_ATTR_RO(max_batch_size);
+
+static ssize_t max_transfer_size_show(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+}
+static DEVICE_ATTR_RO(max_transfer_size);
+
+static ssize_t op_cap_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+}
+static DEVICE_ATTR_RO(op_cap);
+
+static ssize_t configurable_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n",
+                       test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+}
+static DEVICE_ATTR_RO(configurable);
+
+static ssize_t clients_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+       unsigned long flags;
+       int count = 0, i;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               count += wq->client_count;
+       }
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+       return sprintf(buf, "%d\n", count);
+}
+static DEVICE_ATTR_RO(clients);
+
+static ssize_t state_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       switch (idxd->state) {
+       case IDXD_DEV_DISABLED:
+       case IDXD_DEV_CONF_READY:
+               return sprintf(buf, "disabled\n");
+       case IDXD_DEV_ENABLED:
+               return sprintf(buf, "enabled\n");
+       case IDXD_DEV_HALTED:
+               return sprintf(buf, "halted\n");
+       }
+
+       return sprintf(buf, "unknown\n");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t errors_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+       int i, out = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&idxd->dev_lock, flags);
+       for (i = 0; i < 4; i++)
+               out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+       spin_unlock_irqrestore(&idxd->dev_lock, flags);
+       out--;
+       out += sprintf(buf + out, "\n");
+       return out;
+}
+static DEVICE_ATTR_RO(errors);
+
+static ssize_t max_tokens_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->max_tokens);
+}
+static DEVICE_ATTR_RO(max_tokens);
+
+static ssize_t token_limit_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->token_limit);
+}
+
+static ssize_t token_limit_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 10, &val);
+       if (rc < 0)
+               return -EINVAL;
+
+       if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
+       if (!idxd->hw.group_cap.token_limit)
+               return -EPERM;
+
+       if (val > idxd->hw.group_cap.total_tokens)
+               return -EINVAL;
+
+       idxd->token_limit = val;
+       return count;
+}
+static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t cdev_major_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
+
+       return sprintf(buf, "%u\n", idxd->major);
+}
+static DEVICE_ATTR_RO(cdev_major);
+
+static struct attribute *idxd_device_attributes[] = {
+       &dev_attr_max_groups.attr,
+       &dev_attr_max_work_queues.attr,
+       &dev_attr_max_work_queues_size.attr,
+       &dev_attr_max_engines.attr,
+       &dev_attr_numa_node.attr,
+       &dev_attr_max_batch_size.attr,
+       &dev_attr_max_transfer_size.attr,
+       &dev_attr_op_cap.attr,
+       &dev_attr_configurable.attr,
+       &dev_attr_clients.attr,
+       &dev_attr_state.attr,
+       &dev_attr_errors.attr,
+       &dev_attr_max_tokens.attr,
+       &dev_attr_token_limit.attr,
+       &dev_attr_cdev_major.attr,
+       NULL,
+};
+
+static const struct attribute_group idxd_device_attribute_group = {
+       .attrs = idxd_device_attributes,
+};
+
+static const struct attribute_group *idxd_attribute_groups[] = {
+       &idxd_device_attribute_group,
+       NULL,
+};
+
+static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i, rc;
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               engine->conf_dev.parent = &idxd->conf_dev;
+               dev_set_name(&engine->conf_dev, "engine%d.%d",
+                            idxd->id, engine->id);
+               engine->conf_dev.bus = idxd_get_bus_type(idxd);
+               engine->conf_dev.groups = idxd_engine_attribute_groups;
+               engine->conf_dev.type = &idxd_engine_device_type;
+               dev_dbg(dev, "Engine device register: %s\n",
+                       dev_name(&engine->conf_dev));
+               rc = device_register(&engine->conf_dev);
+               if (rc < 0) {
+                       put_device(&engine->conf_dev);
+                       goto cleanup;
+               }
+       }
+
+       return 0;
+
+cleanup:
+       while (i--) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               device_unregister(&engine->conf_dev);
+       }
+       return rc;
+}
+
+static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i, rc;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               group->conf_dev.parent = &idxd->conf_dev;
+               dev_set_name(&group->conf_dev, "group%d.%d",
+                            idxd->id, group->id);
+               group->conf_dev.bus = idxd_get_bus_type(idxd);
+               group->conf_dev.groups = idxd_group_attribute_groups;
+               group->conf_dev.type = &idxd_group_device_type;
+               dev_dbg(dev, "Group device register: %s\n",
+                       dev_name(&group->conf_dev));
+               rc = device_register(&group->conf_dev);
+               if (rc < 0) {
+                       put_device(&group->conf_dev);
+                       goto cleanup;
+               }
+       }
+
+       return 0;
+
+cleanup:
+       while (i--) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               device_unregister(&group->conf_dev);
+       }
+       return rc;
+}
+
+static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int i, rc;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               wq->conf_dev.parent = &idxd->conf_dev;
+               dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+               wq->conf_dev.bus = idxd_get_bus_type(idxd);
+               wq->conf_dev.groups = idxd_wq_attribute_groups;
+               wq->conf_dev.type = &idxd_wq_device_type;
+               dev_dbg(dev, "WQ device register: %s\n",
+                       dev_name(&wq->conf_dev));
+               rc = device_register(&wq->conf_dev);
+               if (rc < 0) {
+                       put_device(&wq->conf_dev);
+                       goto cleanup;
+               }
+       }
+
+       return 0;
+
+cleanup:
+       while (i--) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               device_unregister(&wq->conf_dev);
+       }
+       return rc;
+}
+
+static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+       char devname[IDXD_NAME_SIZE];
+
+       sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+       idxd->conf_dev.parent = dev;
+       dev_set_name(&idxd->conf_dev, "%s", devname);
+       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+       idxd->conf_dev.groups = idxd_attribute_groups;
+       idxd->conf_dev.type = idxd_get_device_type(idxd);
+
+       dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+       rc = device_register(&idxd->conf_dev);
+       if (rc < 0) {
+               put_device(&idxd->conf_dev);
+               return rc;
+       }
+
+       return 0;
+}
+
+int idxd_setup_sysfs(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       int rc;
+
+       rc = idxd_setup_device_sysfs(idxd);
+       if (rc < 0) {
+               dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       rc = idxd_setup_wq_sysfs(idxd);
+       if (rc < 0) {
+               /* unregister conf dev */
+               dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       rc = idxd_setup_group_sysfs(idxd);
+       if (rc < 0) {
+               /* unregister conf dev */
+               dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       rc = idxd_setup_engine_sysfs(idxd);
+       if (rc < 0) {
+               /* unregister conf dev */
+               dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+void idxd_cleanup_sysfs(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = &idxd->wqs[i];
+
+               device_unregister(&wq->conf_dev);
+       }
+
+       for (i = 0; i < idxd->max_engines; i++) {
+               struct idxd_engine *engine = &idxd->engines[i];
+
+               device_unregister(&engine->conf_dev);
+       }
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = &idxd->groups[i];
+
+               device_unregister(&group->conf_dev);
+       }
+
+       device_unregister(&idxd->conf_dev);
+}
+
+int idxd_register_bus_type(void)
+{
+       int i, rc;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++) {
+               rc = bus_register(idxd_bus_types[i]);
+               if (rc < 0)
+                       goto bus_err;
+       }
+
+       return 0;
+
+bus_err:
+       for (; i > 0; i--)
+               bus_unregister(idxd_bus_types[i]);
+       return rc;
+}
+
+void idxd_unregister_bus_type(void)
+{
+       int i;
+
+       for (i = 0; i < IDXD_TYPE_MAX; i++)
+               bus_unregister(idxd_bus_types[i]);
+}
index c27e206a764c3599ef8e548c4d9491399ce70c13..066b21a3223261f26a73a71266d3b0a3e67bb51d 100644 (file)
@@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
                return;
        }
        sdmac->desc = desc = to_sdma_desc(&vd->tx);
-       /*
-        * Do not delete the node in desc_issued list in cyclic mode, otherwise
-        * the desc allocated will never be freed in vchan_dma_desc_free_list
-        */
-       if (!(sdmac->flags & IMX_DMA_SG_LOOP))
-               list_del(&vd->node);
+
+       list_del(&vd->node);
 
        sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
        sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work)
 
        spin_lock_irqsave(&sdmac->vc.lock, flags);
        vchan_get_all_descriptors(&sdmac->vc, &head);
-       sdmac->desc = NULL;
        spin_unlock_irqrestore(&sdmac->vc.lock, flags);
        vchan_dma_desc_free_list(&sdmac->vc, &head);
        sdmac->context_loaded = false;
 }
 
-static int sdma_disable_channel_async(struct dma_chan *chan)
+static int sdma_terminate_all(struct dma_chan *chan)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
 
        sdma_disable_channel(chan);
 
-       if (sdmac->desc)
+       if (sdmac->desc) {
+               vchan_terminate_vdesc(&sdmac->desc->vd);
+               sdmac->desc = NULL;
                schedule_work(&sdmac->terminate_worker);
+       }
+
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
        return 0;
 }
@@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
 
-       sdma_disable_channel_async(chan);
+       sdma_terminate_all(chan);
 
        sdma_channel_synchronize(chan);
 
@@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
                                      struct dma_tx_state *txstate)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       struct sdma_desc *desc;
+       struct sdma_desc *desc = NULL;
        u32 residue;
        struct virt_dma_desc *vd;
        enum dma_status ret;
@@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
                return ret;
 
        spin_lock_irqsave(&sdmac->vc.lock, flags);
+
        vd = vchan_find_desc(&sdmac->vc, cookie);
-       if (vd) {
+       if (vd)
                desc = to_sdma_desc(&vd->tx);
+       else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
+               desc = sdmac->desc;
+
+       if (desc) {
                if (sdmac->flags & IMX_DMA_SG_LOOP)
                        residue = (desc->num_bd - desc->buf_ptail) *
                                desc->period_len - desc->chn_real_count;
                else
                        residue = desc->chn_count - desc->chn_real_count;
-       } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
-               residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
        } else {
                residue = 0;
        }
+
        spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
        dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
@@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev)
        sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
        sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
        sdma->dma_device.device_config = sdma_config;
-       sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+       sdma->dma_device.device_terminate_all = sdma_terminate_all;
        sdma->dma_device.device_synchronize = sdma_channel_synchronize;
        sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
        sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
index 1a422a8b43cfb29806026b0be2996dc1dee4aaa5..18c011e57592ebc838a30d118b7465d8c8a09a46 100644 (file)
@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
 
                descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
                                                 SZ_2M, &descs->hw, flags);
-               if (!descs->virt && (i > 0)) {
+               if (!descs->virt) {
                        int idx;
 
                        for (idx = 0; idx < i; idx++) {
+                               descs = &ioat_chan->descs[idx];
                                dma_free_coherent(to_dev(ioat_chan), SZ_2M,
                                                  descs->virt, descs->hw);
                                descs->virt = NULL;
index a6a6dc432db829a97d07ef14286b647755eb1f2a..60e9afbb896c339bb7255b7b56322348e73cbd12 100644 (file)
@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
        ioat_kobject_del(ioat_dma);
 
        dma_async_device_unregister(dma);
-
-       dma_pool_destroy(ioat_dma->completion_pool);
-
-       INIT_LIST_HEAD(&dma->channels);
 }
 
 /**
@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
        dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
 
        for (i = 0; i < dma->chancnt; i++) {
-               ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+               ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
                if (!ioat_chan)
                        break;
 
@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
                return;
 
        ioat_stop(ioat_chan);
-       ioat_reset_hw(ioat_chan);
 
-       /* Put LTR to idle */
-       if (ioat_dma->version >= IOAT_VER_3_4)
-               writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
-                       ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+       if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+               ioat_reset_hw(ioat_chan);
+
+               /* Put LTR to idle */
+               if (ioat_dma->version >= IOAT_VER_3_4)
+                       writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+                              ioat_chan->reg_base +
+                              IOAT_CHAN_LTR_SWSEL_OFFSET);
+       }
 
        spin_lock_bh(&ioat_chan->cleanup_lock);
        spin_lock_bh(&ioat_chan->prep_lock);
@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
        .err_handler    = &ioat_err_handler,
 };
 
+static void release_ioatdma(struct dma_device *device)
+{
+       struct ioatdma_device *d = to_ioatdma_device(device);
+       int i;
+
+       for (i = 0; i < IOAT_MAX_CHANS; i++)
+               kfree(d->idx[i]);
+
+       dma_pool_destroy(d->completion_pool);
+       kfree(d);
+}
+
 static struct ioatdma_device *
 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
 {
-       struct device *dev = &pdev->dev;
-       struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+       struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
 
        if (!d)
                return NULL;
        d->pdev = pdev;
        d->reg_base = iobase;
+       d->dma_dev.device_release = release_ioatdma;
        return d;
 }
 
@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
        if (!device)
                return;
 
+       ioat_shutdown(pdev);
+
        dev_err(&pdev->dev, "Removing dma and dca services\n");
        if (device->dca) {
                unregister_dca_provider(device->dca, &pdev->dev);
index adecea51814f0a2cb6a26f8593bbf86e9c931e3d..c5c1aa0dcaeddc9c52a5a7f6004e2c20c58dd515 100644 (file)
@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
                        c = p->vchan;
                        if (c && (tc1 & BIT(i))) {
                                spin_lock_irqsave(&c->vc.lock, flags);
-                               vchan_cookie_complete(&p->ds_run->vd);
-                               p->ds_done = p->ds_run;
-                               p->ds_run = NULL;
+                               if (p->ds_run != NULL) {
+                                       vchan_cookie_complete(&p->ds_run->vd);
+                                       p->ds_done = p->ds_run;
+                                       p->ds_run = NULL;
+                               }
                                spin_unlock_irqrestore(&c->vc.lock, flags);
                        }
                        if (c && (tc2 & BIT(i))) {
@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
        if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
                return -EAGAIN;
 
+       /* Avoid losing track of  ds_run if a transaction is in flight */
+       if (c->phy->ds_run)
+               return -EAGAIN;
+
        if (vd) {
                struct k3_dma_desc_sw *ds =
                        container_of(vd, struct k3_dma_desc_sw, vd);
index c20e6bd4e29898eefe0cba3026291f05a1e0f7e3..29f1223b285a4d38bb78dc3bd24b65e41d57eeef 100644 (file)
@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
 
        spin_lock_irqsave(&c->vc.lock, flags);
        vchan_get_all_descriptors(&c->vc, &head);
-       vchan_dma_desc_free_list(&c->vc, &head);
        spin_unlock_irqrestore(&c->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&c->vc, &head);
+
        return 0;
 }
 
index c2d779daa4b51ac8d5373a1ccfa8185d3b1f0233..b2c2b5e8093cf0d7c64898e06be41f3b79e0f76b 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/of.h>
 #include <linux/of_dma.h>
 
+#include "dmaengine.h"
+
 static LIST_HEAD(of_dma_list);
 static DEFINE_MUTEX(of_dma_lock);
 
index 023f951189a727af61771991c85be0ac650449e3..c683051257fd66a19b2861107c80feffa4891930 100644 (file)
@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
        }
 
        vchan_get_all_descriptors(&vchan->vc, &head);
-       vchan_dma_desc_free_list(&vchan->vc, &head);
 
        spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
        return 0;
 }
 
index 6cce9ef61b2944020d67c821606e3451bd2e33bb..88b884cbb7c1b84f5fa92327abd50924670eb613 100644 (file)
@@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
 {
        struct amba_device *pcdev = to_amba_device(dev);
 
-       pm_runtime_disable(dev);
-
-       if (!pm_runtime_status_suspended(dev)) {
-               /* amba did not disable the clock */
-               amba_pclk_disable(pcdev);
-       }
+       pm_runtime_force_suspend(dev);
        amba_pclk_unprepare(pcdev);
 
        return 0;
@@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev)
        if (ret)
                return ret;
 
-       if (!pm_runtime_status_suspended(dev))
-               ret = amba_pclk_enable(pcdev);
-
-       pm_runtime_enable(dev);
+       pm_runtime_force_resume(dev);
 
        return ret;
 }
 
-static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+static const struct dev_pm_ops pl330_pm = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
+};
 
 static int
 pl330_probe(struct amba_device *adev, const struct amba_id *id)
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
new file mode 100644 (file)
index 0000000..db4c5fd
--- /dev/null
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
+ * Copyright (c) 2019, GigaIO Networks, Inc
+ */
+
+#include "dmaengine.h"
+
+#include <linux/circ_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logan Gunthorpe");
+
+#define PLX_REG_DESC_RING_ADDR                 0x214
+#define PLX_REG_DESC_RING_ADDR_HI              0x218
+#define PLX_REG_DESC_RING_NEXT_ADDR            0x21C
+#define PLX_REG_DESC_RING_COUNT                        0x220
+#define PLX_REG_DESC_RING_LAST_ADDR            0x224
+#define PLX_REG_DESC_RING_LAST_SIZE            0x228
+#define PLX_REG_PREF_LIMIT                     0x234
+#define PLX_REG_CTRL                           0x238
+#define PLX_REG_CTRL2                          0x23A
+#define PLX_REG_INTR_CTRL                      0x23C
+#define PLX_REG_INTR_STATUS                    0x23E
+
+#define PLX_REG_PREF_LIMIT_PREF_FOUR           8
+
+#define PLX_REG_CTRL_GRACEFUL_PAUSE            BIT(0)
+#define PLX_REG_CTRL_ABORT                     BIT(1)
+#define PLX_REG_CTRL_WRITE_BACK_EN             BIT(2)
+#define PLX_REG_CTRL_START                     BIT(3)
+#define PLX_REG_CTRL_RING_STOP_MODE            BIT(4)
+#define PLX_REG_CTRL_DESC_MODE_BLOCK           (0 << 5)
+#define PLX_REG_CTRL_DESC_MODE_ON_CHIP         (1 << 5)
+#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP                (2 << 5)
+#define PLX_REG_CTRL_DESC_INVALID              BIT(8)
+#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE       BIT(9)
+#define PLX_REG_CTRL_ABORT_DONE                        BIT(10)
+#define PLX_REG_CTRL_IMM_PAUSE_DONE            BIT(12)
+#define PLX_REG_CTRL_IN_PROGRESS               BIT(30)
+
+#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
+                                PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
+                                PLX_REG_CTRL_ABORT_DONE | \
+                                PLX_REG_CTRL_IMM_PAUSE_DONE)
+
+#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
+                                PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
+                                PLX_REG_CTRL_START | \
+                                PLX_REG_CTRL_RESET_VAL)
+
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B                0
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B       1
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B       2
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B       3
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB                4
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB                5
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B         7
+
+#define PLX_REG_INTR_CRTL_ERROR_EN             BIT(0)
+#define PLX_REG_INTR_CRTL_INV_DESC_EN          BIT(1)
+#define PLX_REG_INTR_CRTL_ABORT_DONE_EN                BIT(3)
+#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN                BIT(4)
+#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN    BIT(5)
+
+#define PLX_REG_INTR_STATUS_ERROR              BIT(0)
+#define PLX_REG_INTR_STATUS_INV_DESC           BIT(1)
+#define PLX_REG_INTR_STATUS_DESC_DONE          BIT(2)
+#define PLX_REG_INTR_CRTL_ABORT_DONE           BIT(3)
+
+struct plx_dma_hw_std_desc {
+       __le32 flags_and_size;
+       __le16 dst_addr_hi;
+       __le16 src_addr_hi;
+       __le32 dst_addr_lo;
+       __le32 src_addr_lo;
+};
+
+#define PLX_DESC_SIZE_MASK             0x7ffffff
+#define PLX_DESC_FLAG_VALID            BIT(31)
+#define PLX_DESC_FLAG_INT_WHEN_DONE    BIT(30)
+
+#define PLX_DESC_WB_SUCCESS            BIT(30)
+#define PLX_DESC_WB_RD_FAIL            BIT(29)
+#define PLX_DESC_WB_WR_FAIL            BIT(28)
+
+#define PLX_DMA_RING_COUNT             2048
+
+struct plx_dma_desc {
+       struct dma_async_tx_descriptor txd;
+       struct plx_dma_hw_std_desc *hw;
+       u32 orig_size;
+};
+
+struct plx_dma_dev {
+       struct dma_device dma_dev;
+       struct dma_chan dma_chan;
+       struct pci_dev __rcu *pdev;
+       void __iomem *bar;
+       struct tasklet_struct desc_task;
+
+       spinlock_t ring_lock;
+       bool ring_active;
+       int head;
+       int tail;
+       struct plx_dma_hw_std_desc *hw_ring;
+       dma_addr_t hw_ring_dma;
+       struct plx_dma_desc **desc_ring;
+};
+
+static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
+{
+       return container_of(c, struct plx_dma_dev, dma_chan);
+}
+
+static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
+{
+       return container_of(txd, struct plx_dma_desc, txd);
+}
+
+static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
+{
+       return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
+}
+
+static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
+{
+       struct dmaengine_result res;
+       struct plx_dma_desc *desc;
+       u32 flags;
+
+       spin_lock_bh(&plxdev->ring_lock);
+
+       while (plxdev->tail != plxdev->head) {
+               desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+               flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
+
+               if (flags & PLX_DESC_FLAG_VALID)
+                       break;
+
+               res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
+
+               if (flags & PLX_DESC_WB_SUCCESS)
+                       res.result = DMA_TRANS_NOERROR;
+               else if (flags & PLX_DESC_WB_WR_FAIL)
+                       res.result = DMA_TRANS_WRITE_FAILED;
+               else
+                       res.result = DMA_TRANS_READ_FAILED;
+
+               dma_cookie_complete(&desc->txd);
+               dma_descriptor_unmap(&desc->txd);
+               dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+               desc->txd.callback = NULL;
+               desc->txd.callback_result = NULL;
+
+               plxdev->tail++;
+       }
+
+       spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+{
+       struct dmaengine_result res;
+       struct plx_dma_desc *desc;
+
+       plx_dma_process_desc(plxdev);
+
+       spin_lock_bh(&plxdev->ring_lock);
+
+       while (plxdev->tail != plxdev->head) {
+               desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+               res.residue = desc->orig_size;
+               res.result = DMA_TRANS_ABORTED;
+
+               dma_cookie_complete(&desc->txd);
+               dma_descriptor_unmap(&desc->txd);
+               dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+               desc->txd.callback = NULL;
+               desc->txd.callback_result = NULL;
+
+               plxdev->tail++;
+       }
+
+       spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void __plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+       u32 val;
+
+       val = readl(plxdev->bar + PLX_REG_CTRL);
+       if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
+               return;
+
+       writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+              plxdev->bar + PLX_REG_CTRL);
+
+       while (!time_after(jiffies, timeout)) {
+               val = readl(plxdev->bar + PLX_REG_CTRL);
+               if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
+                       break;
+
+               cpu_relax();
+       }
+
+       if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
+               dev_err(plxdev->dma_dev.dev,
+                       "Timeout waiting for graceful pause!\n");
+
+       writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+              plxdev->bar + PLX_REG_CTRL);
+
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+       writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+}
+
+static void plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+       rcu_read_lock();
+       if (!rcu_dereference(plxdev->pdev)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       __plx_dma_stop(plxdev);
+
+       rcu_read_unlock();
+}
+
+static void plx_dma_desc_task(unsigned long data)
+{
+       struct plx_dma_dev *plxdev = (void *)data;
+
+       plx_dma_process_desc(plxdev);
+}
+
+static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
+               dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+               unsigned long flags)
+       __acquires(plxdev->ring_lock)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
+       struct plx_dma_desc *plxdesc;
+
+       spin_lock_bh(&plxdev->ring_lock);
+       if (!plxdev->ring_active)
+               goto err_unlock;
+
+       if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
+               goto err_unlock;
+
+       if (len > PLX_DESC_SIZE_MASK)
+               goto err_unlock;
+
+       plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
+       plxdev->head++;
+
+       plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
+       plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
+       plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
+       plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
+
+       plxdesc->orig_size = len;
+
+       if (flags & DMA_PREP_INTERRUPT)
+               len |= PLX_DESC_FLAG_INT_WHEN_DONE;
+
+       plxdesc->hw->flags_and_size = cpu_to_le32(len);
+       plxdesc->txd.flags = flags;
+
+       /* return with the lock held, it will be released in tx_submit */
+
+       return &plxdesc->txd;
+
+err_unlock:
+       /*
+        * Keep sparse happy by restoring an even lock count on
+        * this lock.
+        */
+       __acquire(plxdev->ring_lock);
+
+       spin_unlock_bh(&plxdev->ring_lock);
+       return NULL;
+}
+
+static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
+       __releases(plxdev->ring_lock)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
+       struct plx_dma_desc *plxdesc = to_plx_desc(desc);
+       dma_cookie_t cookie;
+
+       cookie = dma_cookie_assign(desc);
+
+       /*
+        * Ensure the descriptor updates are visible to the dma device
+        * before setting the valid bit.
+        */
+       wmb();
+
+       plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
+
+       spin_unlock_bh(&plxdev->ring_lock);
+
+       return cookie;
+}
+
+static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
+               dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+       enum dma_status ret;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE)
+               return ret;
+
+       plx_dma_process_desc(plxdev);
+
+       return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void plx_dma_issue_pending(struct dma_chan *chan)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+
+       rcu_read_lock();
+       if (!rcu_dereference(plxdev->pdev)) {
+               rcu_read_unlock();
+               return;
+       }
+
+       /*
+        * Ensure the valid bits are visible before starting the
+        * DMA engine.
+        */
+       wmb();
+
+       writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
+
+       rcu_read_unlock();
+}
+
+static irqreturn_t plx_dma_isr(int irq, void *devid)
+{
+       struct plx_dma_dev *plxdev = devid;
+       u32 status;
+
+       status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
+
+       if (!status)
+               return IRQ_NONE;
+
+       if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
+               tasklet_schedule(&plxdev->desc_task);
+
+       writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
+
+       return IRQ_HANDLED;
+}
+
+static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
+{
+       struct plx_dma_desc *desc;
+       int i;
+
+       plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
+                                   sizeof(*plxdev->desc_ring), GFP_KERNEL);
+       if (!plxdev->desc_ring)
+               return -ENOMEM;
+
+       for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
+               desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+               if (!desc)
+                       goto free_and_exit;
+
+               dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
+               desc->txd.tx_submit = plx_dma_tx_submit;
+               desc->hw = &plxdev->hw_ring[i];
+
+               plxdev->desc_ring[i] = desc;
+       }
+
+       return 0;
+
+free_and_exit:
+       for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+               kfree(plxdev->desc_ring[i]);
+       kfree(plxdev->desc_ring);
+       return -ENOMEM;
+}
+
+static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+       size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+       int rc;
+
+       plxdev->head = plxdev->tail = 0;
+       plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
+                                            &plxdev->hw_ring_dma, GFP_KERNEL);
+       if (!plxdev->hw_ring)
+               return -ENOMEM;
+
+       rc = plx_dma_alloc_desc(plxdev);
+       if (rc)
+               goto out_free_hw_ring;
+
+       rcu_read_lock();
+       if (!rcu_dereference(plxdev->pdev)) {
+               rcu_read_unlock();
+               rc = -ENODEV;
+               goto out_free_hw_ring;
+       }
+
+       writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
+       writel(lower_32_bits(plxdev->hw_ring_dma),
+              plxdev->bar + PLX_REG_DESC_RING_ADDR);
+       writel(upper_32_bits(plxdev->hw_ring_dma),
+              plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+       writel(lower_32_bits(plxdev->hw_ring_dma),
+              plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+       writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+       writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
+
+       plxdev->ring_active = true;
+
+       rcu_read_unlock();
+
+       return PLX_DMA_RING_COUNT;
+
+out_free_hw_ring:
+       dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+                         plxdev->hw_ring_dma);
+       return rc;
+}
+
+static void plx_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+       size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+       struct pci_dev *pdev;
+       int irq = -1;
+       int i;
+
+       spin_lock_bh(&plxdev->ring_lock);
+       plxdev->ring_active = false;
+       spin_unlock_bh(&plxdev->ring_lock);
+
+       plx_dma_stop(plxdev);
+
+       rcu_read_lock();
+       pdev = rcu_dereference(plxdev->pdev);
+       if (pdev)
+               irq = pci_irq_vector(pdev, 0);
+       rcu_read_unlock();
+
+       if (irq > 0)
+               synchronize_irq(irq);
+
+       tasklet_kill(&plxdev->desc_task);
+
+       plx_dma_abort_desc(plxdev);
+
+       for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+               kfree(plxdev->desc_ring[i]);
+
+       kfree(plxdev->desc_ring);
+       dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+                         plxdev->hw_ring_dma);
+
+}
+
+static void plx_dma_release(struct dma_device *dma_dev)
+{
+       struct plx_dma_dev *plxdev =
+               container_of(dma_dev, struct plx_dma_dev, dma_dev);
+
+       put_device(dma_dev->dev);
+       kfree(plxdev);
+}
+
+static int plx_dma_create(struct pci_dev *pdev)
+{
+       struct plx_dma_dev *plxdev;
+       struct dma_device *dma;
+       struct dma_chan *chan;
+       int rc;
+
+       plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
+       if (!plxdev)
+               return -ENOMEM;
+
+       rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+                        KBUILD_MODNAME, plxdev);
+       if (rc) {
+               kfree(plxdev);
+               return rc;
+       }
+
+       spin_lock_init(&plxdev->ring_lock);
+       tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
+                    (unsigned long)plxdev);
+
+       RCU_INIT_POINTER(plxdev->pdev, pdev);
+       plxdev->bar = pcim_iomap_table(pdev)[0];
+
+       dma = &plxdev->dma_dev;
+       dma->chancnt = 1;
+       INIT_LIST_HEAD(&dma->channels);
+       dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+       dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
+       dma->dev = get_device(&pdev->dev);
+
+       dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = plx_dma_free_chan_resources;
+       dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
+       dma->device_issue_pending = plx_dma_issue_pending;
+       dma->device_tx_status = plx_dma_tx_status;
+       dma->device_release = plx_dma_release;
+
+       chan = &plxdev->dma_chan;
+       chan->device = dma;
+       dma_cookie_init(chan);
+       list_add_tail(&chan->device_node, &dma->channels);
+
+       rc = dma_async_device_register(dma);
+       if (rc) {
+               pci_err(pdev, "Failed to register dma device: %d\n", rc);
+               free_irq(pci_irq_vector(pdev, 0),  plxdev);
+               kfree(plxdev);
+               return rc;
+       }
+
+       pci_set_drvdata(pdev, plxdev);
+
+       return 0;
+}
+
+static int plx_dma_probe(struct pci_dev *pdev,
+                        const struct pci_device_id *id)
+{
+       int rc;
+
+       rc = pcim_enable_device(pdev);
+       if (rc)
+               return rc;
+
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (rc)
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (rc)
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
+       if (rc)
+               return rc;
+
+       rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (rc <= 0)
+               return rc;
+
+       pci_set_master(pdev);
+
+       rc = plx_dma_create(pdev);
+       if (rc)
+               goto err_free_irq_vectors;
+
+       pci_info(pdev, "PLX DMA Channel Registered\n");
+
+       return 0;
+
+err_free_irq_vectors:
+       pci_free_irq_vectors(pdev);
+       return rc;
+}
+
+static void plx_dma_remove(struct pci_dev *pdev)
+{
+       struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
+
+       free_irq(pci_irq_vector(pdev, 0),  plxdev);
+
+       rcu_assign_pointer(plxdev->pdev, NULL);
+       synchronize_rcu();
+
+       spin_lock_bh(&plxdev->ring_lock);
+       plxdev->ring_active = false;
+       spin_unlock_bh(&plxdev->ring_lock);
+
+       __plx_dma_stop(plxdev);
+       plx_dma_abort_desc(plxdev);
+
+       plxdev->bar = NULL;
+       dma_async_device_unregister(&plxdev->dma_dev);
+
+       pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id plx_dma_pci_tbl[] = {
+       {
+               .vendor         = PCI_VENDOR_ID_PLX,
+               .device         = 0x87D0,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .class          = PCI_CLASS_SYSTEM_OTHER << 8,
+               .class_mask     = 0xFFFFFFFF,
+       },
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
+
+static struct pci_driver plx_dma_pci_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = plx_dma_pci_tbl,
+       .probe          = plx_dma_probe,
+       .remove         = plx_dma_remove,
+};
+module_pci_driver(plx_dma_pci_driver);
index 43da8eeb18ef0710b21c43854d2f3a951991d525..8e14c72d03f056989f2eace2796596c73bc000b0 100644 (file)
@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
        s3c24xx_dma_start_next_sg(s3cchan, txd);
 }
 
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
-                               struct s3c24xx_dma_chan *s3cchan)
-{
-       LIST_HEAD(head);
-
-       vchan_get_all_descriptors(&s3cchan->vc, &head);
-       vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
 /*
  * Try to allocate a physical channel.  When successful, assign it to
  * this virtual channel, and initiate the next descriptor.  The
@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
 {
        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+       LIST_HEAD(head);
        unsigned long flags;
-       int ret = 0;
+       int ret;
 
        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 
@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
        }
 
        /* Dequeue jobs not yet fired as well */
-       s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+       vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+       spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+       vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+       return 0;
+
 unlock:
        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 
@@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
 
        /* Basic sanity check */
        if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
-               dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+               dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
                        pdata->num_phy_channels, MAX_DMA_CHANNELS);
                return -EINVAL;
        }
index 465256fe8b1fc30209b1c4b29dd1c8261754f7e9..6d0bec9476365515c0ca46ecf08c514833d99f65 100644 (file)
@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
        kfree(chan->desc);
        chan->desc = NULL;
        vchan_get_all_descriptors(&chan->vchan, &head);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
        sf_pdma_disclaim_chan(chan);
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
 }
 
 static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
        chan->desc = NULL;
        chan->xfer_err = false;
        vchan_get_all_descriptors(&chan->vchan, &head);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
 
        return 0;
 }
index e397a50058c8825f2ea3ac188f8f64d160bf08dc..bbc2bda3b902f0a798906ee8922723aa4be030f7 100644 (file)
@@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
        dma_addr_t src, dest;
        u32 endpoints;
        int nr_periods, offset, plength, i;
+       u8 ram_type, io_mode, linear_mode;
 
        if (!is_slave_direction(dir)) {
                dev_err(chan2dev(chan), "Invalid DMA direction\n");
                return NULL;
        }
 
-       if (vchan->is_dedicated) {
-               /*
-                * As we are using this just for audio data, we need to use
-                * normal DMA. There is nothing stopping us from supporting
-                * dedicated DMA here as well, so if a client comes up and
-                * requires it, it will be simple to implement it.
-                */
-               dev_err(chan2dev(chan),
-                       "Cyclic transfers are only supported on Normal DMA\n");
-               return NULL;
-       }
-
        contract = generate_dma_contract();
        if (!contract)
                return NULL;
 
        contract->is_cyclic = 1;
 
-       /* Figure out the endpoints and the address we need */
+       if (vchan->is_dedicated) {
+               io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+               linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+               ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+       } else {
+               io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+               linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+               ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+       }
+
        if (dir == DMA_MEM_TO_DEV) {
                src = buf;
                dest = sconfig->dst_addr;
-               endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
-                           SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
-                           SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+               endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+                           SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+                           SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
        } else {
                src = sconfig->src_addr;
                dest = buf;
-               endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
-                           SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
-                           SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+               endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+                           SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+                           SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
        }
 
        /*
@@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
                        dest = buf + offset;
 
                /* Make the promise */
-               promise = generate_ndma_promise(chan, src, dest,
-                                               plength, sconfig, dir);
+               if (vchan->is_dedicated)
+                       promise = generate_ddma_promise(chan, src, dest,
+                                                       plength, sconfig);
+               else
+                       promise = generate_ndma_promise(chan, src, dest,
+                                                       plength, sconfig, dir);
+
                if (!promise) {
                        /* TODO: should we free everything? */
                        return NULL;
@@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
        }
 
        spin_lock_irqsave(&vchan->vc.lock, flags);
-       vchan_dma_desc_free_list(&vchan->vc, &head);
        /* Clear these so the vchan is usable again */
        vchan->processing = NULL;
        vchan->pchan = NULL;
        spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
        return 0;
 }
 
index d507c24fbf31e1591b32bc42a811a91878fc3846..f76e06651f804c04e5df2c8aa6214ed93b7c2362 100644 (file)
@@ -34,5 +34,29 @@ config DMA_OMAP
          Enable support for the TI sDMA (System DMA or DMA4) controller. This
          DMA engine is found on OMAP and DRA7xx parts.
 
+config TI_K3_UDMA
+       bool "Texas Instruments UDMA support"
+       depends on ARCH_K3 || COMPILE_TEST
+       depends on TI_SCI_PROTOCOL
+       depends on TI_SCI_INTA_IRQCHIP
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select TI_K3_RINGACC
+       select TI_K3_PSIL
+        help
+         Enable support for the TI UDMA (Unified DMA) controller. This
+         DMA engine is used in AM65x and j721e.
+
+config TI_K3_UDMA_GLUE_LAYER
+       bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+       depends on ARCH_K3 || COMPILE_TEST
+       depends on TI_K3_UDMA
+       help
+         Say y here to support the K3 NAVSS DMA glue interface
+         If unsure, say N.
+
+config TI_K3_PSIL
+       bool
+
 config TI_DMA_CROSSBAR
        bool
index 113e59ec9c32d5c17bcee6c9faae67375ef14c90..9a29a107e37473afcab587243e2e8a19110febd0 100644 (file)
@@ -2,4 +2,7 @@
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
+obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
 obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
index 756a3c951dc72a6544087cbb67054d6e99281bd4..03a7f647f7b2c8fdd1a9c42752ece5e81e057bdf 100644 (file)
@@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev)
        if (!info)
                return -ENODEV;
 
-       pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_get_sync() failed\n");
-               return ret;
-       }
-
        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
        if (ret)
                return ret;
@@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ecc);
 
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               dev_err(dev, "pm_runtime_get_sync() failed\n");
+               pm_runtime_disable(dev);
+               return ret;
+       }
+
        /* Get eDMA3 configuration from IP */
        ret = edma_setup_from_hw(dev, info, ecc);
        if (ret)
-               return ret;
+               goto err_disable_pm;
 
        /* Allocate memory based on the information we got from the IP */
        ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
                                        sizeof(*ecc->slave_chans), GFP_KERNEL);
-       if (!ecc->slave_chans)
-               return -ENOMEM;
 
        ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
                                       sizeof(unsigned long), GFP_KERNEL);
-       if (!ecc->slot_inuse)
-               return -ENOMEM;
 
        ecc->channels_mask = devm_kcalloc(dev,
                                           BITS_TO_LONGS(ecc->num_channels),
                                           sizeof(unsigned long), GFP_KERNEL);
-       if (!ecc->channels_mask)
-               return -ENOMEM;
+       if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
+               ret = -ENOMEM;
+               goto err_disable_pm;
+       }
 
        /* Mark all channels available initially */
        bitmap_fill(ecc->channels_mask, ecc->num_channels);
@@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev)
                                       ecc);
                if (ret) {
                        dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
-                       return ret;
+                       goto err_disable_pm;
                }
                ecc->ccint = irq;
        }
@@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev)
                                       ecc);
                if (ret) {
                        dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
-                       return ret;
+                       goto err_disable_pm;
                }
                ecc->ccerrint = irq;
        }
@@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev)
        ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
        if (ecc->dummy_slot < 0) {
                dev_err(dev, "Can't allocate PaRAM dummy slot\n");
-               return ecc->dummy_slot;
+               ret = ecc->dummy_slot;
+               goto err_disable_pm;
        }
 
        queue_priority_mapping = info->queue_priority_mapping;
@@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev)
 
 err_reg1:
        edma_free_slot(ecc, ecc->dummy_slot);
+err_disable_pm:
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
        return ret;
 }
 
@@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev)
        if (ecc->dma_memcpy)
                dma_async_device_unregister(ecc->dma_memcpy);
        edma_free_slot(ecc, ecc->dummy_slot);
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
 
        return 0;
 }
diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c
new file mode 100644 (file)
index 0000000..a896a15
--- /dev/null
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+               },                                      \
+       }
+
+#define PSIL_PDMA_XY_PKT(x)                            \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pkt_mode = 1,                  \
+               },                                      \
+       }
+
+#define PSIL_ETHERNET(x)                               \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 16,                 \
+               },                                      \
+       }
+
+#define PSIL_SA2UL(x, tx)                              \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 64,                 \
+                       .notdpkt = tx,                  \
+               },                                      \
+       }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am654_src_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0x4000, 0),
+       PSIL_SA2UL(0x4001, 0),
+       PSIL_SA2UL(0x4002, 0),
+       PSIL_SA2UL(0x4003, 0),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0x4100),
+       PSIL_ETHERNET(0x4101),
+       PSIL_ETHERNET(0x4102),
+       PSIL_ETHERNET(0x4103),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0x4200),
+       PSIL_ETHERNET(0x4201),
+       PSIL_ETHERNET(0x4202),
+       PSIL_ETHERNET(0x4203),
+       /* PRU_ICSSG2 */
+       PSIL_ETHERNET(0x4300),
+       PSIL_ETHERNET(0x4301),
+       PSIL_ETHERNET(0x4302),
+       PSIL_ETHERNET(0x4303),
+       /* PDMA0 - McASPs */
+       PSIL_PDMA_XY_TR(0x4400),
+       PSIL_PDMA_XY_TR(0x4401),
+       PSIL_PDMA_XY_TR(0x4402),
+       /* PDMA1 - SPI0-4 */
+       PSIL_PDMA_XY_PKT(0x4500),
+       PSIL_PDMA_XY_PKT(0x4501),
+       PSIL_PDMA_XY_PKT(0x4502),
+       PSIL_PDMA_XY_PKT(0x4503),
+       PSIL_PDMA_XY_PKT(0x4504),
+       PSIL_PDMA_XY_PKT(0x4505),
+       PSIL_PDMA_XY_PKT(0x4506),
+       PSIL_PDMA_XY_PKT(0x4507),
+       PSIL_PDMA_XY_PKT(0x4508),
+       PSIL_PDMA_XY_PKT(0x4509),
+       PSIL_PDMA_XY_PKT(0x450a),
+       PSIL_PDMA_XY_PKT(0x450b),
+       PSIL_PDMA_XY_PKT(0x450c),
+       PSIL_PDMA_XY_PKT(0x450d),
+       PSIL_PDMA_XY_PKT(0x450e),
+       PSIL_PDMA_XY_PKT(0x450f),
+       PSIL_PDMA_XY_PKT(0x4510),
+       PSIL_PDMA_XY_PKT(0x4511),
+       PSIL_PDMA_XY_PKT(0x4512),
+       PSIL_PDMA_XY_PKT(0x4513),
+       /* PDMA1 - USART0-2 */
+       PSIL_PDMA_XY_PKT(0x4514),
+       PSIL_PDMA_XY_PKT(0x4515),
+       PSIL_PDMA_XY_PKT(0x4516),
+       /* CPSW0 */
+       PSIL_ETHERNET(0x7000),
+       /* MCU_PDMA0 - ADCs */
+       PSIL_PDMA_XY_TR(0x7100),
+       PSIL_PDMA_XY_TR(0x7101),
+       PSIL_PDMA_XY_TR(0x7102),
+       PSIL_PDMA_XY_TR(0x7103),
+       /* MCU_PDMA1 - MCU_SPI0-2 */
+       PSIL_PDMA_XY_PKT(0x7200),
+       PSIL_PDMA_XY_PKT(0x7201),
+       PSIL_PDMA_XY_PKT(0x7202),
+       PSIL_PDMA_XY_PKT(0x7203),
+       PSIL_PDMA_XY_PKT(0x7204),
+       PSIL_PDMA_XY_PKT(0x7205),
+       PSIL_PDMA_XY_PKT(0x7206),
+       PSIL_PDMA_XY_PKT(0x7207),
+       PSIL_PDMA_XY_PKT(0x7208),
+       PSIL_PDMA_XY_PKT(0x7209),
+       PSIL_PDMA_XY_PKT(0x720a),
+       PSIL_PDMA_XY_PKT(0x720b),
+       /* MCU_PDMA1 - MCU_USART0 */
+       PSIL_PDMA_XY_PKT(0x7212),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am654_dst_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0xc000, 1),
+       PSIL_SA2UL(0xc001, 1),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0xc100),
+       PSIL_ETHERNET(0xc101),
+       PSIL_ETHERNET(0xc102),
+       PSIL_ETHERNET(0xc103),
+       PSIL_ETHERNET(0xc104),
+       PSIL_ETHERNET(0xc105),
+       PSIL_ETHERNET(0xc106),
+       PSIL_ETHERNET(0xc107),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0xc200),
+       PSIL_ETHERNET(0xc201),
+       PSIL_ETHERNET(0xc202),
+       PSIL_ETHERNET(0xc203),
+       PSIL_ETHERNET(0xc204),
+       PSIL_ETHERNET(0xc205),
+       PSIL_ETHERNET(0xc206),
+       PSIL_ETHERNET(0xc207),
+       /* PRU_ICSSG2 */
+       PSIL_ETHERNET(0xc300),
+       PSIL_ETHERNET(0xc301),
+       PSIL_ETHERNET(0xc302),
+       PSIL_ETHERNET(0xc303),
+       PSIL_ETHERNET(0xc304),
+       PSIL_ETHERNET(0xc305),
+       PSIL_ETHERNET(0xc306),
+       PSIL_ETHERNET(0xc307),
+       /* CPSW0 */
+       PSIL_ETHERNET(0xf000),
+       PSIL_ETHERNET(0xf001),
+       PSIL_ETHERNET(0xf002),
+       PSIL_ETHERNET(0xf003),
+       PSIL_ETHERNET(0xf004),
+       PSIL_ETHERNET(0xf005),
+       PSIL_ETHERNET(0xf006),
+       PSIL_ETHERNET(0xf007),
+};
+
+struct psil_ep_map am654_ep_map = {
+       .name = "am654",
+       .src = am654_src_ep_map,
+       .src_count = ARRAY_SIZE(am654_src_ep_map),
+       .dst = am654_dst_ep_map,
+       .dst_count = ARRAY_SIZE(am654_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
new file mode 100644 (file)
index 0000000..e3cfd5f
--- /dev/null
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+               },                                      \
+       }
+
+#define PSIL_PDMA_XY_PKT(x)                            \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pkt_mode = 1,                  \
+               },                                      \
+       }
+
+#define PSIL_PDMA_MCASP(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pdma_acc32 = 1,                \
+                       .pdma_burst = 1,                \
+               },                                      \
+       }
+
+#define PSIL_ETHERNET(x)                               \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 16,                 \
+               },                                      \
+       }
+
+#define PSIL_SA2UL(x, tx)                              \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+                       .pkt_mode = 1,                  \
+                       .needs_epib = 1,                \
+                       .psd_size = 64,                 \
+                       .notdpkt = tx,                  \
+               },                                      \
+       }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721e_src_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0x4000, 0),
+       PSIL_SA2UL(0x4001, 0),
+       PSIL_SA2UL(0x4002, 0),
+       PSIL_SA2UL(0x4003, 0),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0x4100),
+       PSIL_ETHERNET(0x4101),
+       PSIL_ETHERNET(0x4102),
+       PSIL_ETHERNET(0x4103),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0x4200),
+       PSIL_ETHERNET(0x4201),
+       PSIL_ETHERNET(0x4202),
+       PSIL_ETHERNET(0x4203),
+       /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+       PSIL_PDMA_MCASP(0x4400),
+       PSIL_PDMA_MCASP(0x4401),
+       PSIL_PDMA_MCASP(0x4402),
+       /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+       PSIL_PDMA_MCASP(0x4500),
+       PSIL_PDMA_MCASP(0x4501),
+       PSIL_PDMA_MCASP(0x4502),
+       PSIL_PDMA_MCASP(0x4503),
+       PSIL_PDMA_MCASP(0x4504),
+       PSIL_PDMA_MCASP(0x4505),
+       PSIL_PDMA_MCASP(0x4506),
+       PSIL_PDMA_MCASP(0x4507),
+       PSIL_PDMA_MCASP(0x4508),
+       /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+       PSIL_PDMA_XY_PKT(0x4600),
+       PSIL_PDMA_XY_PKT(0x4601),
+       PSIL_PDMA_XY_PKT(0x4602),
+       PSIL_PDMA_XY_PKT(0x4603),
+       PSIL_PDMA_XY_PKT(0x4604),
+       PSIL_PDMA_XY_PKT(0x4605),
+       PSIL_PDMA_XY_PKT(0x4606),
+       PSIL_PDMA_XY_PKT(0x4607),
+       /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+       PSIL_PDMA_XY_PKT(0x460c),
+       PSIL_PDMA_XY_PKT(0x460d),
+       PSIL_PDMA_XY_PKT(0x460e),
+       PSIL_PDMA_XY_PKT(0x460f),
+       PSIL_PDMA_XY_PKT(0x4610),
+       PSIL_PDMA_XY_PKT(0x4611),
+       PSIL_PDMA_XY_PKT(0x4612),
+       PSIL_PDMA_XY_PKT(0x4613),
+       /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+       PSIL_PDMA_XY_PKT(0x4618),
+       PSIL_PDMA_XY_PKT(0x4619),
+       PSIL_PDMA_XY_PKT(0x461a),
+       PSIL_PDMA_XY_PKT(0x461b),
+       PSIL_PDMA_XY_PKT(0x461c),
+       PSIL_PDMA_XY_PKT(0x461d),
+       PSIL_PDMA_XY_PKT(0x461e),
+       PSIL_PDMA_XY_PKT(0x461f),
+       /* PDMA11 (PDMA_MISC_G3) */
+       PSIL_PDMA_XY_PKT(0x4624),
+       PSIL_PDMA_XY_PKT(0x4625),
+       PSIL_PDMA_XY_PKT(0x4626),
+       PSIL_PDMA_XY_PKT(0x4627),
+       PSIL_PDMA_XY_PKT(0x4628),
+       PSIL_PDMA_XY_PKT(0x4629),
+       PSIL_PDMA_XY_PKT(0x4630),
+       PSIL_PDMA_XY_PKT(0x463a),
+       /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+       PSIL_PDMA_XY_PKT(0x4700),
+       PSIL_PDMA_XY_PKT(0x4701),
+       /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+       PSIL_PDMA_XY_PKT(0x4702),
+       PSIL_PDMA_XY_PKT(0x4703),
+       /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+       PSIL_PDMA_XY_PKT(0x4704),
+       PSIL_PDMA_XY_PKT(0x4705),
+       PSIL_PDMA_XY_PKT(0x4706),
+       PSIL_PDMA_XY_PKT(0x4707),
+       PSIL_PDMA_XY_PKT(0x4708),
+       PSIL_PDMA_XY_PKT(0x4709),
+       /* CPSW9 */
+       PSIL_ETHERNET(0x4a00),
+       /* CPSW0 */
+       PSIL_ETHERNET(0x7000),
+       /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+       PSIL_PDMA_XY_PKT(0x7100),
+       PSIL_PDMA_XY_PKT(0x7101),
+       PSIL_PDMA_XY_PKT(0x7102),
+       PSIL_PDMA_XY_PKT(0x7103),
+       /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+       PSIL_PDMA_XY_PKT(0x7200),
+       PSIL_PDMA_XY_PKT(0x7201),
+       PSIL_PDMA_XY_PKT(0x7202),
+       PSIL_PDMA_XY_PKT(0x7203),
+       PSIL_PDMA_XY_PKT(0x7204),
+       PSIL_PDMA_XY_PKT(0x7205),
+       PSIL_PDMA_XY_PKT(0x7206),
+       PSIL_PDMA_XY_PKT(0x7207),
+       /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+       PSIL_PDMA_XY_PKT(0x7300),
+       /* MCU_PDMA_ADC - ADC0-1 */
+       PSIL_PDMA_XY_TR(0x7400),
+       PSIL_PDMA_XY_TR(0x7401),
+       PSIL_PDMA_XY_TR(0x7402),
+       PSIL_PDMA_XY_TR(0x7403),
+       /* SA2UL */
+       PSIL_SA2UL(0x7500, 0),
+       PSIL_SA2UL(0x7501, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721e_dst_ep_map[] = {
+       /* SA2UL */
+       PSIL_SA2UL(0xc000, 1),
+       PSIL_SA2UL(0xc001, 1),
+       /* PRU_ICSSG0 */
+       PSIL_ETHERNET(0xc100),
+       PSIL_ETHERNET(0xc101),
+       PSIL_ETHERNET(0xc102),
+       PSIL_ETHERNET(0xc103),
+       PSIL_ETHERNET(0xc104),
+       PSIL_ETHERNET(0xc105),
+       PSIL_ETHERNET(0xc106),
+       PSIL_ETHERNET(0xc107),
+       /* PRU_ICSSG1 */
+       PSIL_ETHERNET(0xc200),
+       PSIL_ETHERNET(0xc201),
+       PSIL_ETHERNET(0xc202),
+       PSIL_ETHERNET(0xc203),
+       PSIL_ETHERNET(0xc204),
+       PSIL_ETHERNET(0xc205),
+       PSIL_ETHERNET(0xc206),
+       PSIL_ETHERNET(0xc207),
+       /* CPSW9 */
+       PSIL_ETHERNET(0xca00),
+       PSIL_ETHERNET(0xca01),
+       PSIL_ETHERNET(0xca02),
+       PSIL_ETHERNET(0xca03),
+       PSIL_ETHERNET(0xca04),
+       PSIL_ETHERNET(0xca05),
+       PSIL_ETHERNET(0xca06),
+       PSIL_ETHERNET(0xca07),
+       /* CPSW0 */
+       PSIL_ETHERNET(0xf000),
+       PSIL_ETHERNET(0xf001),
+       PSIL_ETHERNET(0xf002),
+       PSIL_ETHERNET(0xf003),
+       PSIL_ETHERNET(0xf004),
+       PSIL_ETHERNET(0xf005),
+       PSIL_ETHERNET(0xf006),
+       PSIL_ETHERNET(0xf007),
+       /* SA2UL */
+       PSIL_SA2UL(0xf500, 1),
+};
+
+struct psil_ep_map j721e_ep_map = {
+       .name = "j721e",
+       .src = j721e_src_ep_map,
+       .src_count = ARRAY_SIZE(j721e_src_ep_map),
+       .dst = j721e_dst_ep_map,
+       .dst_count = ARRAY_SIZE(j721e_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
new file mode 100644 (file)
index 0000000..a1f389c
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_PRIV_H_
+#define K3_PSIL_PRIV_H_
+
+#include <linux/dma/k3-psil.h>
+
+struct psil_ep {
+       u32 thread_id;
+       struct psil_endpoint_config ep_config;
+};
+
+/**
+ * struct psil_ep_map - PSI-L thread ID configuration maps
+ * @name:      Name of the map, set it to the name of the SoC
+ * @src:       Array of source PSI-L thread configurations
+ * @src_count: Number of entries in the src array
+ * @dst:       Array of destination PSI-L thread configurations
+ * @dst_count: Number of entries in the dst array
+ *
+ * In case of symmetric configuration for a matching src/dst thread (for example
+ * 0x4400 and 0xc400) only the src configuration can be present. If no dst
+ * configuration found the code will look for (dst_thread_id & ~0x8000) to find
+ * the symmetric match.
+ */
+struct psil_ep_map {
+       char *name;
+       struct psil_ep  *src;
+       int src_count;
+       struct psil_ep  *dst;
+       int dst_count;
+};
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
+
+/* SoC PSI-L endpoint maps */
+extern struct psil_ep_map am654_ep_map;
+extern struct psil_ep_map j721e_ep_map;
+
+#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
new file mode 100644 (file)
index 0000000..d7b9650
--- /dev/null
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "k3-psil-priv.h"
+
+static DEFINE_MUTEX(ep_map_mutex);
+static struct psil_ep_map *soc_ep_map;
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
+{
+       int i;
+
+       mutex_lock(&ep_map_mutex);
+       if (!soc_ep_map) {
+               if (of_machine_is_compatible("ti,am654")) {
+                       soc_ep_map = &am654_ep_map;
+               } else if (of_machine_is_compatible("ti,j721e")) {
+                       soc_ep_map = &j721e_ep_map;
+               } else {
+                       pr_err("PSIL: No compatible machine found for map\n");
+                       return ERR_PTR(-ENOTSUPP);
+               }
+               pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+       }
+       mutex_unlock(&ep_map_mutex);
+
+       if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) {
+               /* check in destination thread map */
+               for (i = 0; i < soc_ep_map->dst_count; i++) {
+                       if (soc_ep_map->dst[i].thread_id == thread_id)
+                               return &soc_ep_map->dst[i].ep_config;
+               }
+       }
+
+       thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET;
+       if (soc_ep_map->src) {
+               for (i = 0; i < soc_ep_map->src_count; i++) {
+                       if (soc_ep_map->src[i].thread_id == thread_id)
+                               return &soc_ep_map->src[i].ep_config;
+               }
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(psil_get_ep_config);
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+                          struct psil_endpoint_config *ep_config)
+{
+       struct psil_endpoint_config *dst_ep_config;
+       struct of_phandle_args dma_spec;
+       u32 thread_id;
+       int index;
+
+       if (!dev || !dev->of_node)
+               return -EINVAL;
+
+       index = of_property_match_string(dev->of_node, "dma-names", name);
+       if (index < 0)
+               return index;
+
+       if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells",
+                                      index, &dma_spec))
+               return -ENOENT;
+
+       thread_id = dma_spec.args[0];
+
+       dst_ep_config = psil_get_ep_config(thread_id);
+       if (IS_ERR(dst_ep_config)) {
+               pr_err("PSIL: thread ID 0x%04x not defined in map\n",
+                      thread_id);
+               of_node_put(dma_spec.np);
+               return PTR_ERR(dst_ep_config);
+       }
+
+       memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config));
+
+       of_node_put(dma_spec.np);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
new file mode 100644 (file)
index 0000000..c151129
--- /dev/null
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 NAVSS DMA glue interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct k3_udma_glue_common {
+       struct device *dev;
+       struct udma_dev *udmax;
+       const struct udma_tisci_rm *tisci_rm;
+       struct k3_ringacc *ringacc;
+       u32 src_thread;
+       u32 dst_thread;
+
+       u32  hdesc_size;
+       bool epib;
+       u32  psdata_size;
+       u32  swdata_size;
+};
+
+struct k3_udma_glue_tx_channel {
+       struct k3_udma_glue_common common;
+
+       struct udma_tchan *udma_tchanx;
+       int udma_tchan_id;
+
+       struct k3_ring *ringtx;
+       struct k3_ring *ringtxcq;
+
+       bool psil_paired;
+
+       int virq;
+
+       atomic_t free_pkts;
+       bool tx_pause_on_err;
+       bool tx_filt_einfo;
+       bool tx_filt_pswords;
+       bool tx_supr_tdpkt;
+};
+
+struct k3_udma_glue_rx_flow {
+       struct udma_rflow *udma_rflow;
+       int udma_rflow_id;
+       struct k3_ring *ringrx;
+       struct k3_ring *ringrxfdq;
+
+       int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+       struct k3_udma_glue_common common;
+
+       struct udma_rchan *udma_rchanx;
+       int udma_rchan_id;
+       bool remote;
+
+       bool psil_paired;
+
+       u32  swdata_size;
+       int  flow_id_base;
+
+       struct k3_udma_glue_rx_flow *flows;
+       u32 flow_num;
+       u32 flows_ready;
+};
+
+#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
+
+static int of_k3_udma_glue_parse(struct device_node *udmax_np,
+                                struct k3_udma_glue_common *common)
+{
+       common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
+                                                      "ti,ringacc");
+       if (IS_ERR(common->ringacc))
+               return PTR_ERR(common->ringacc);
+
+       common->udmax = of_xudma_dev_get(udmax_np, NULL);
+       if (IS_ERR(common->udmax))
+               return PTR_ERR(common->udmax);
+
+       common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
+
+       return 0;
+}
+
+static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
+               const char *name, struct k3_udma_glue_common *common,
+               bool tx_chn)
+{
+       struct psil_endpoint_config *ep_config;
+       struct of_phandle_args dma_spec;
+       u32 thread_id;
+       int ret = 0;
+       int index;
+
+       if (unlikely(!name))
+               return -EINVAL;
+
+       index = of_property_match_string(chn_np, "dma-names", name);
+       if (index < 0)
+               return index;
+
+       if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
+                                      &dma_spec))
+               return -ENOENT;
+
+       thread_id = dma_spec.args[0];
+
+       if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+               ret = -EINVAL;
+               goto out_put_spec;
+       }
+
+       if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+               ret = -EINVAL;
+               goto out_put_spec;
+       }
+
+       /* get psil endpoint config */
+       ep_config = psil_get_ep_config(thread_id);
+       if (IS_ERR(ep_config)) {
+               dev_err(common->dev,
+                       "No configuration for psi-l thread 0x%04x\n",
+                       thread_id);
+               ret = PTR_ERR(ep_config);
+               goto out_put_spec;
+       }
+
+       common->epib = ep_config->needs_epib;
+       common->psdata_size = ep_config->psd_size;
+
+       if (tx_chn)
+               common->dst_thread = thread_id;
+       else
+               common->src_thread = thread_id;
+
+       ret = of_k3_udma_glue_parse(dma_spec.np, common);
+
+out_put_spec:
+       of_node_put(dma_spec.np);
+       return ret;
+};
+
+static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       struct device *dev = tx_chn->common.dev;
+
+       dev_dbg(dev, "dump_tx_chn:\n"
+               "udma_tchan_id: %d\n"
+               "src_thread: %08x\n"
+               "dst_thread: %08x\n",
+               tx_chn->udma_tchan_id,
+               tx_chn->common.src_thread,
+               tx_chn->common.dst_thread);
+}
+
+static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
+                                       char *mark)
+{
+       struct device *dev = chn->common.dev;
+
+       dev_dbg(dev, "=== dump ===> %s\n", mark);
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
+               xudma_tchanrt_read(chn->udma_tchanx,
+                                  UDMA_TCHAN_RT_PEER_RT_EN_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
+               xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
+}
+
+static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
+       struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.index = tx_chn->udma_tchan_id;
+       if (tx_chn->tx_pause_on_err)
+               req.tx_pause_on_err = 1;
+       if (tx_chn->tx_filt_einfo)
+               req.tx_filt_einfo = 1;
+       if (tx_chn->tx_filt_pswords)
+               req.tx_filt_pswords = 1;
+       req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+       if (tx_chn->tx_supr_tdpkt)
+               req.tx_supr_tdpkt = 1;
+       req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
+       req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+       return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
+}
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+               const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+       struct k3_udma_glue_tx_channel *tx_chn;
+       int ret;
+
+       tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+       if (!tx_chn)
+               return ERR_PTR(-ENOMEM);
+
+       tx_chn->common.dev = dev;
+       tx_chn->common.swdata_size = cfg->swdata_size;
+       tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+       tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+       tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+       tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+       /* parse of udmap channel */
+       ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+                                       &tx_chn->common, true);
+       if (ret)
+               goto err;
+
+       tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
+                                               tx_chn->common.psdata_size,
+                                               tx_chn->common.swdata_size);
+
+       /* request and cfg UDMAP TX channel */
+       tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+       if (IS_ERR(tx_chn->udma_tchanx)) {
+               ret = PTR_ERR(tx_chn->udma_tchanx);
+               dev_err(dev, "UDMAX tchanx get err %d\n", ret);
+               goto err;
+       }
+       tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+
+       atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+
+       /* request and cfg rings */
+       tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
+                                                tx_chn->udma_tchan_id, 0);
+       if (!tx_chn->ringtx) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get TX ring %u\n",
+                       tx_chn->udma_tchan_id);
+               goto err;
+       }
+
+       tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
+                                                  -1, 0);
+       if (!tx_chn->ringtxcq) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get TXCQ ring\n");
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+               goto err;
+       }
+
+       /* request and cfg psi-l */
+       tx_chn->common.src_thread =
+                       xudma_dev_get_psil_base(tx_chn->common.udmax) +
+                       tx_chn->udma_tchan_id;
+
+       ret = k3_udma_glue_cfg_tx_chn(tx_chn);
+       if (ret) {
+               dev_err(dev, "Failed to cfg tchan %d\n", ret);
+               goto err;
+       }
+
+       ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+                                   tx_chn->common.src_thread,
+                                   tx_chn->common.dst_thread);
+       if (ret) {
+               dev_err(dev, "PSI-L request err %d\n", ret);
+               goto err;
+       }
+
+       tx_chn->psil_paired = true;
+
+       /* reset TX RT registers */
+       k3_udma_glue_disable_tx_chn(tx_chn);
+
+       k3_udma_glue_dump_tx_chn(tx_chn);
+
+       return tx_chn;
+
+err:
+       k3_udma_glue_release_tx_chn(tx_chn);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       if (tx_chn->psil_paired) {
+               xudma_navss_psil_unpair(tx_chn->common.udmax,
+                                       tx_chn->common.src_thread,
+                                       tx_chn->common.dst_thread);
+               tx_chn->psil_paired = false;
+       }
+
+       if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
+               xudma_tchan_put(tx_chn->common.udmax,
+                               tx_chn->udma_tchanx);
+
+       if (tx_chn->ringtxcq)
+               k3_ringacc_ring_free(tx_chn->ringtxcq);
+
+       if (tx_chn->ringtx)
+               k3_ringacc_ring_free(tx_chn->ringtx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
+
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                            struct cppi5_host_desc_t *desc_tx,
+                            dma_addr_t desc_dma)
+{
+       u32 ringtxcq_id;
+
+       if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
+               return -ENOMEM;
+
+       ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+       cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
+
+       return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
+
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                           dma_addr_t *desc_dma)
+{
+       int ret;
+
+       ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
+       if (!ret)
+               atomic_inc(&tx_chn->free_pkts);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       u32 txrt_ctl;
+
+       txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+       xudma_tchanrt_write(tx_chn->udma_tchanx,
+                           UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                           txrt_ctl);
+
+       txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+                                     UDMA_TCHAN_RT_CTL_REG);
+       txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+       xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+                           txrt_ctl);
+
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+       return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
+
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
+
+       xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
+
+       xudma_tchanrt_write(tx_chn->udma_tchanx,
+                           UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
+
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                              bool sync)
+{
+       int i = 0;
+       u32 val;
+
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
+
+       xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+                           UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
+
+       val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
+
+       while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+               val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+                                        UDMA_TCHAN_RT_CTL_REG);
+               udelay(1);
+               if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+                       dev_err(tx_chn->common.dev, "TX tdown timeout\n");
+                       break;
+               }
+               i++;
+       }
+
+       val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+                                UDMA_TCHAN_RT_PEER_RT_EN_REG);
+       if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+               dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
+       k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
+
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                              void *data,
+                              void (*cleanup)(void *data, dma_addr_t desc_dma))
+{
+       dma_addr_t desc_dma;
+       int occ_tx, i, ret;
+
+       /* reset TXCQ as it is not input for udma - expected to be empty */
+       if (tx_chn->ringtxcq)
+               k3_ringacc_ring_reset(tx_chn->ringtxcq);
+
+       /*
+        * TXQ reset need to be special way as it is input for udma and its
+        * state cached by udma, so:
+        * 1) save TXQ occ
+        * 2) clean up TXQ and call callback .cleanup() for each desc
+        * 3) reset TXQ in a special way
+        */
+       occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
+       dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+
+       for (i = 0; i < occ_tx; i++) {
+               ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
+               if (ret) {
+                       dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+                       break;
+               }
+               cleanup(data, desc_dma);
+       }
+
+       k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
+
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       return tx_chn->common.hdesc_size;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
+
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
+
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
+{
+       tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+
+       return tx_chn->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+
+static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
+       int ret;
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+                          TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.index = rx_chn->udma_rchan_id;
+       req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
+       /*
+        * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
+        * and udmax impl, so just configure it to invalid value.
+        * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
+        */
+       req.rxcq_qnum = 0xFFFF;
+       if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+               /* Default flow + extra ones */
+               req.flowid_start = rx_chn->flow_id_base;
+               req.flowid_cnt = rx_chn->flow_num;
+       }
+       req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
+       if (ret)
+               dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
+                       rx_chn->udma_rchan_id, ret);
+
+       return ret;
+}
+
+static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+                                        u32 flow_num)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+       if (IS_ERR_OR_NULL(flow->udma_rflow))
+               return;
+
+       if (flow->ringrxfdq)
+               k3_ringacc_ring_free(flow->ringrxfdq);
+
+       if (flow->ringrx)
+               k3_ringacc_ring_free(flow->ringrx);
+
+       xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+       flow->udma_rflow = NULL;
+       rx_chn->flows_ready--;
+}
+
+static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+                                   u32 flow_idx,
+                                   struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct device *dev = rx_chn->common.dev;
+       struct ti_sci_msg_rm_udmap_flow_cfg req;
+       int rx_ring_id;
+       int rx_ringfdq_id;
+       int ret = 0;
+
+       flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
+                                          flow->udma_rflow_id);
+       if (IS_ERR(flow->udma_rflow)) {
+               ret = PTR_ERR(flow->udma_rflow);
+               dev_err(dev, "UDMAX rflow get err %d\n", ret);
+               goto err;
+       }
+
+       if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
+               xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+               return -ENODEV;
+       }
+
+       /* request and cfg rings */
+       flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
+                                              flow_cfg->ring_rxq_id, 0);
+       if (!flow->ringrx) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get RX ring\n");
+               goto err;
+       }
+
+       flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
+                                                 flow_cfg->ring_rxfdq0_id, 0);
+       if (!flow->ringrxfdq) {
+               ret = -ENODEV;
+               dev_err(dev, "Failed to get RXFDQ ring\n");
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringrx %d\n", ret);
+               goto err;
+       }
+
+       ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
+       if (ret) {
+               dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
+               goto err;
+       }
+
+       if (rx_chn->remote) {
+               rx_ring_id = TI_SCI_RESOURCE_NULL;
+               rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
+       } else {
+               rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+               rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+       }
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params =
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.flow_index = flow->udma_rflow_id;
+       if (rx_chn->common.epib)
+               req.rx_einfo_present = 1;
+       if (rx_chn->common.psdata_size)
+               req.rx_psinfo_present = 1;
+       if (flow_cfg->rx_error_handling)
+               req.rx_error_handling = 1;
+       req.rx_desc_type = 0;
+       req.rx_dest_qnum = rx_ring_id;
+       req.rx_src_tag_hi_sel = 0;
+       req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
+       req.rx_dest_tag_hi_sel = 0;
+       req.rx_dest_tag_lo_sel = 0;
+       req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+       req.rx_fdq1_qnum = rx_ringfdq_id;
+       req.rx_fdq2_qnum = rx_ringfdq_id;
+       req.rx_fdq3_qnum = rx_ringfdq_id;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+       if (ret) {
+               dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
+                       ret);
+               goto err;
+       }
+
+       rx_chn->flows_ready++;
+       dev_dbg(dev, "flow%d config done. ready:%d\n",
+               flow->udma_rflow_id, rx_chn->flows_ready);
+
+       return 0;
+err:
+       k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+       return ret;
+}
+
+static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
+{
+       struct device *dev = chn->common.dev;
+
+       dev_dbg(dev, "dump_rx_chn:\n"
+               "udma_rchan_id: %d\n"
+               "src_thread: %08x\n"
+               "dst_thread: %08x\n"
+               "epib: %d\n"
+               "hdesc_size: %u\n"
+               "psdata_size: %u\n"
+               "swdata_size: %u\n"
+               "flow_id_base: %d\n"
+               "flow_num: %d\n",
+               chn->udma_rchan_id,
+               chn->common.src_thread,
+               chn->common.dst_thread,
+               chn->common.epib,
+               chn->common.hdesc_size,
+               chn->common.psdata_size,
+               chn->common.swdata_size,
+               chn->flow_id_base,
+               chn->flow_num);
+}
+
+static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
+                                       char *mark)
+{
+       struct device *dev = chn->common.dev;
+
+       dev_dbg(dev, "=== dump ===> %s\n", mark);
+
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
+               xudma_rchanrt_read(chn->udma_rchanx,
+                                  UDMA_RCHAN_RT_PEER_RT_EN_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
+       dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
+               xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
+}
+
+static int
+k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
+                              struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       int ret;
+
+       /* default rflow */
+       if (cfg->flow_id_use_rxchan_id)
+               return 0;
+
+       /* not a GP rflows */
+       if (rx_chn->flow_id_base != -1 &&
+           !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+               return 0;
+
+       /* Allocate range of GP rflows */
+       ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+                                        rx_chn->flow_id_base,
+                                        rx_chn->flow_num);
+       if (ret < 0) {
+               dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+                       rx_chn->flow_id_base, rx_chn->flow_num, ret);
+               return ret;
+       }
+       rx_chn->flow_id_base = ret;
+
+       return 0;
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
+                                struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       struct k3_udma_glue_rx_channel *rx_chn;
+       int ret, i;
+
+       if (cfg->flow_id_num <= 0)
+               return ERR_PTR(-EINVAL);
+
+       if (cfg->flow_id_num != 1 &&
+           (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
+               return ERR_PTR(-EINVAL);
+
+       rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+       if (!rx_chn)
+               return ERR_PTR(-ENOMEM);
+
+       rx_chn->common.dev = dev;
+       rx_chn->common.swdata_size = cfg->swdata_size;
+       rx_chn->remote = false;
+
+       /* parse of udmap channel */
+       ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+                                       &rx_chn->common, false);
+       if (ret)
+               goto err;
+
+       rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+                                               rx_chn->common.psdata_size,
+                                               rx_chn->common.swdata_size);
+
+       /* request and cfg UDMAP RX channel */
+       rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+       if (IS_ERR(rx_chn->udma_rchanx)) {
+               ret = PTR_ERR(rx_chn->udma_rchanx);
+               dev_err(dev, "UDMAX rchanx get err %d\n", ret);
+               goto err;
+       }
+       rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
+
+       rx_chn->flow_num = cfg->flow_id_num;
+       rx_chn->flow_id_base = cfg->flow_id_base;
+
+       /* Use RX channel id as flow id: target dev can't generate flow_id */
+       if (cfg->flow_id_use_rxchan_id)
+               rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+
+       rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+                                    sizeof(*rx_chn->flows), GFP_KERNEL);
+       if (!rx_chn->flows) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < rx_chn->flow_num; i++)
+               rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+       /* request and cfg psi-l */
+       rx_chn->common.dst_thread =
+                       xudma_dev_get_psil_base(rx_chn->common.udmax) +
+                       rx_chn->udma_rchan_id;
+
+       ret = k3_udma_glue_cfg_rx_chn(rx_chn);
+       if (ret) {
+               dev_err(dev, "Failed to cfg rchan %d\n", ret);
+               goto err;
+       }
+
+       /* init default RX flow only if flow_num = 1 */
+       if (cfg->def_flow_cfg) {
+               ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
+               if (ret)
+                       goto err;
+       }
+
+       ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+                                   rx_chn->common.src_thread,
+                                   rx_chn->common.dst_thread);
+       if (ret) {
+               dev_err(dev, "PSI-L request err %d\n", ret);
+               goto err;
+       }
+
+       rx_chn->psil_paired = true;
+
+       /* reset RX RT registers */
+       k3_udma_glue_disable_rx_chn(rx_chn);
+
+       k3_udma_glue_dump_rx_chn(rx_chn);
+
+       return rx_chn;
+
+err:
+       k3_udma_glue_release_rx_chn(rx_chn);
+       return ERR_PTR(ret);
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
+                                  struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       struct k3_udma_glue_rx_channel *rx_chn;
+       int ret, i;
+
+       if (cfg->flow_id_num <= 0 ||
+           cfg->flow_id_use_rxchan_id ||
+           cfg->def_flow_cfg ||
+           cfg->flow_id_base < 0)
+               return ERR_PTR(-EINVAL);
+
+       /*
+        * Remote RX channel is under control of Remote CPU core, so
+        * Linux can only request and manipulate by dedicated RX flows
+        */
+
+       rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+       if (!rx_chn)
+               return ERR_PTR(-ENOMEM);
+
+       rx_chn->common.dev = dev;
+       rx_chn->common.swdata_size = cfg->swdata_size;
+       rx_chn->remote = true;
+       rx_chn->udma_rchan_id = -1;
+       rx_chn->flow_num = cfg->flow_id_num;
+       rx_chn->flow_id_base = cfg->flow_id_base;
+       rx_chn->psil_paired = false;
+
+       /* parse of udmap channel */
+       ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+                                       &rx_chn->common, false);
+       if (ret)
+               goto err;
+
+       rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+                                               rx_chn->common.psdata_size,
+                                               rx_chn->common.swdata_size);
+
+       rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+                                    sizeof(*rx_chn->flows), GFP_KERNEL);
+       if (!rx_chn->flows) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < rx_chn->flow_num; i++)
+               rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+       k3_udma_glue_dump_rx_chn(rx_chn);
+
+       return rx_chn;
+
+err:
+       k3_udma_glue_release_rx_chn(rx_chn);
+       return ERR_PTR(ret);
+}
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+                           struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+       if (cfg->remote)
+               return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
+       else
+               return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       int i;
+
+       if (IS_ERR_OR_NULL(rx_chn->common.udmax))
+               return;
+
+       if (rx_chn->psil_paired) {
+               xudma_navss_psil_unpair(rx_chn->common.udmax,
+                                       rx_chn->common.src_thread,
+                                       rx_chn->common.dst_thread);
+               rx_chn->psil_paired = false;
+       }
+
+       for (i = 0; i < rx_chn->flow_num; i++)
+               k3_udma_glue_release_rx_flow(rx_chn, i);
+
+       if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+               xudma_free_gp_rflow_range(rx_chn->common.udmax,
+                                         rx_chn->flow_id_base,
+                                         rx_chn->flow_num);
+
+       if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
+               xudma_rchan_put(rx_chn->common.udmax,
+                               rx_chn->udma_rchanx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
+
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+                             u32 flow_idx,
+                             struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+       if (flow_idx >= rx_chn->flow_num)
+               return -EINVAL;
+
+       return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
+
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+                                   u32 flow_idx)
+{
+       struct k3_udma_glue_rx_flow *flow;
+
+       if (flow_idx >= rx_chn->flow_num)
+               return -EINVAL;
+
+       flow = &rx_chn->flows[flow_idx];
+
+       return k3_ringacc_get_ring_id(flow->ringrxfdq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
+
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       return rx_chn->flow_id_base;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
+
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+                               u32 flow_idx)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct device *dev = rx_chn->common.dev;
+       struct ti_sci_msg_rm_udmap_flow_cfg req;
+       int rx_ring_id;
+       int rx_ringfdq_id;
+       int ret = 0;
+
+       if (!rx_chn->remote)
+               return -EINVAL;
+
+       rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+       rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+
+       memset(&req, 0, sizeof(req));
+
+       req.valid_params =
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.flow_index = flow->udma_rflow_id;
+       req.rx_dest_qnum = rx_ring_id;
+       req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+       req.rx_fdq1_qnum = rx_ringfdq_id;
+       req.rx_fdq2_qnum = rx_ringfdq_id;
+       req.rx_fdq3_qnum = rx_ringfdq_id;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+       if (ret) {
+               dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
+                       ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
+
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+                                u32 flow_idx)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+       const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+       struct device *dev = rx_chn->common.dev;
+       struct ti_sci_msg_rm_udmap_flow_cfg req;
+       int ret = 0;
+
+       if (!rx_chn->remote)
+               return -EINVAL;
+
+       memset(&req, 0, sizeof(req));
+       req.valid_params =
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+                       TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+       req.nav_id = tisci_rm->tisci_dev_id;
+       req.flow_index = flow->udma_rflow_id;
+       req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
+       req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
+
+       ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+       if (ret) {
+               dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
+                       ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       u32 rxrt_ctl;
+
+       if (rx_chn->remote)
+               return -EINVAL;
+
+       if (rx_chn->flows_ready < rx_chn->flow_num)
+               return -EINVAL;
+
+       rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+                                     UDMA_RCHAN_RT_CTL_REG);
+       rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+       xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
+                           rxrt_ctl);
+
+       xudma_rchanrt_write(rx_chn->udma_rchanx,
+                           UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                           UDMA_PEER_RT_EN_ENABLE);
+
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
+       return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
+
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
+
+       xudma_rchanrt_write(rx_chn->udma_rchanx,
+                           UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                           0);
+       xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
+
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
+
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                              bool sync)
+{
+       int i = 0;
+       u32 val;
+
+       if (rx_chn->remote)
+               return;
+
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
+
+       xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                           UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
+
+       val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
+
+       while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+               val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+                                        UDMA_RCHAN_RT_CTL_REG);
+               udelay(1);
+               if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+                       dev_err(rx_chn->common.dev, "RX tdown timeout\n");
+                       break;
+               }
+               i++;
+       }
+
+       val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+                                UDMA_RCHAN_RT_PEER_RT_EN_REG);
+       if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+               dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
+       k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
+
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, void *data,
+               void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+       struct device *dev = rx_chn->common.dev;
+       dma_addr_t desc_dma;
+       int occ_rx, i, ret;
+
+       /* reset RXCQ as it is not input for udma - expected to be empty */
+       occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
+       dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
+       if (flow->ringrx)
+               k3_ringacc_ring_reset(flow->ringrx);
+
+       /* Skip RX FDQ in case one FDQ is used for the set of flows */
+       if (skip_fdq)
+               return;
+
+       /*
+        * RX FDQ reset need to be special way as it is input for udma and its
+        * state cached by udma, so:
+        * 1) save RX FDQ occ
+        * 2) clean up RX FDQ and call callback .cleanup() for each desc
+        * 3) reset RX FDQ in a special way
+        */
+       occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
+       dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
+
+       for (i = 0; i < occ_rx; i++) {
+               ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
+               if (ret) {
+                       dev_err(dev, "RX reset pop %d\n", ret);
+                       break;
+               }
+               cleanup(data, desc_dma);
+       }
+
+       k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
+
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                            u32 flow_num, struct cppi5_host_desc_t *desc_rx,
+                            dma_addr_t desc_dma)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+       return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
+
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                           u32 flow_num, dma_addr_t *desc_dma)
+{
+       struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+       return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
+
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+                           u32 flow_num)
+{
+       struct k3_udma_glue_rx_flow *flow;
+
+       flow = &rx_chn->flows[flow_num];
+
+       flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+
+       return flow->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
new file mode 100644 (file)
index 0000000..0b8f3dd
--- /dev/null
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+       return navss_psil_pair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_pair);
+
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+       return navss_psil_unpair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_unpair);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+{
+       struct device_node *udma_node = np;
+       struct platform_device *pdev;
+       struct udma_dev *ud;
+
+       if (property) {
+               udma_node = of_parse_phandle(np, property, 0);
+               if (!udma_node) {
+                       pr_err("UDMA node is not found\n");
+                       return ERR_PTR(-ENODEV);
+               }
+       }
+
+       pdev = of_find_device_by_node(udma_node);
+       if (!pdev) {
+               pr_debug("UDMA device not found\n");
+               return ERR_PTR(-EPROBE_DEFER);
+       }
+
+       if (np != udma_node)
+               of_node_put(udma_node);
+
+       ud = platform_get_drvdata(pdev);
+       if (!ud) {
+               pr_debug("UDMA has not been probed\n");
+               return ERR_PTR(-EPROBE_DEFER);
+       }
+
+       return ud;
+}
+EXPORT_SYMBOL(of_xudma_dev_get);
+
+u32 xudma_dev_get_psil_base(struct udma_dev *ud)
+{
+       return ud->psil_base;
+}
+EXPORT_SYMBOL(xudma_dev_get_psil_base);
+
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
+{
+       return &ud->tisci_rm;
+}
+EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       return __udma_alloc_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
+
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       return __udma_free_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+       return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
+
+#define XUDMA_GET_PUT_RESOURCE(res)                                    \
+struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id)      \
+{                                                                      \
+       return __udma_reserve_##res(ud, false, id);                     \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##_get);                                      \
+                                                                       \
+void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p)      \
+{                                                                      \
+       clear_bit(p->id, ud->res##_map);                                \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##_put)
+XUDMA_GET_PUT_RESOURCE(tchan);
+XUDMA_GET_PUT_RESOURCE(rchan);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+       return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+       __udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
+
+#define XUDMA_GET_RESOURCE_ID(res)                                     \
+int xudma_##res##_get_id(struct udma_##res *p)                         \
+{                                                                      \
+       return p->id;                                                   \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##_get_id)
+XUDMA_GET_RESOURCE_ID(tchan);
+XUDMA_GET_RESOURCE_ID(rchan);
+XUDMA_GET_RESOURCE_ID(rflow);
+
+/* Exported register access functions */
+#define XUDMA_RT_IO_FUNCTIONS(res)                                     \
+u32 xudma_##res##rt_read(struct udma_##res *p, int reg)                        \
+{                                                                      \
+       return udma_##res##rt_read(p, reg);                             \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##rt_read);                                   \
+                                                                       \
+void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val)     \
+{                                                                      \
+       udma_##res##rt_write(p, reg, val);                              \
+}                                                                      \
+EXPORT_SYMBOL(xudma_##res##rt_write)
+XUDMA_RT_IO_FUNCTIONS(tchan);
+XUDMA_RT_IO_FUNCTIONS(rchan);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
new file mode 100644 (file)
index 0000000..ea79c2d
--- /dev/null
@@ -0,0 +1,3432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "../virt-dma.h"
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct udma_static_tr {
+       u8 elsize; /* RPSTR0 */
+       u16 elcnt; /* RPSTR0 */
+       u16 bstcnt; /* RPSTR1 */
+};
+
+#define K3_UDMA_MAX_RFLOWS             1024
+#define K3_UDMA_DEFAULT_RING_SIZE      16
+
+/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
+#define UDMA_RFLOW_SRCTAG_NONE         0
+#define UDMA_RFLOW_SRCTAG_CFG_TAG      1
+#define UDMA_RFLOW_SRCTAG_FLOW_ID      2
+#define UDMA_RFLOW_SRCTAG_SRC_TAG      4
+
+#define UDMA_RFLOW_DSTTAG_NONE         0
+#define UDMA_RFLOW_DSTTAG_CFG_TAG      1
+#define UDMA_RFLOW_DSTTAG_FLOW_ID      2
+#define UDMA_RFLOW_DSTTAG_DST_TAG_LO   4
+#define UDMA_RFLOW_DSTTAG_DST_TAG_HI   5
+
+struct udma_chan;
+
+enum udma_mmr {
+       MMR_GCFG = 0,
+       MMR_RCHANRT,
+       MMR_TCHANRT,
+       MMR_LAST,
+};
+
+static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+
+struct udma_tchan {
+       void __iomem *reg_rt;
+
+       int id;
+       struct k3_ring *t_ring; /* Transmit ring */
+       struct k3_ring *tc_ring; /* Transmit Completion ring */
+};
+
+struct udma_rflow {
+       int id;
+       struct k3_ring *fd_ring; /* Free Descriptor ring */
+       struct k3_ring *r_ring; /* Receive ring */
+};
+
+struct udma_rchan {
+       void __iomem *reg_rt;
+
+       int id;
+};
+
+#define UDMA_FLAG_PDMA_ACC32           BIT(0)
+#define UDMA_FLAG_PDMA_BURST           BIT(1)
+
+struct udma_match_data {
+       u32 psil_base;
+       bool enable_memcpy_support;
+       u32 flags;
+       u32 statictr_z_mask;
+       u32 rchan_oes_offset;
+
+       u8 tpl_levels;
+       u32 level_start_idx[];
+};
+
+struct udma_dev {
+       struct dma_device ddev;
+       struct device *dev;
+       void __iomem *mmrs[MMR_LAST];
+       const struct udma_match_data *match_data;
+
+       size_t desc_align; /* alignment to use for descriptors */
+
+       struct udma_tisci_rm tisci_rm;
+
+       struct k3_ringacc *ringacc;
+
+       struct work_struct purge_work;
+       struct list_head desc_to_purge;
+       spinlock_t lock;
+
+       int tchan_cnt;
+       int echan_cnt;
+       int rchan_cnt;
+       int rflow_cnt;
+       unsigned long *tchan_map;
+       unsigned long *rchan_map;
+       unsigned long *rflow_gp_map;
+       unsigned long *rflow_gp_map_allocated;
+       unsigned long *rflow_in_use;
+
+       struct udma_tchan *tchans;
+       struct udma_rchan *rchans;
+       struct udma_rflow *rflows;
+
+       struct udma_chan *channels;
+       u32 psil_base;
+};
+
+struct udma_hwdesc {
+       size_t cppi5_desc_size;
+       void *cppi5_desc_vaddr;
+       dma_addr_t cppi5_desc_paddr;
+
+       /* TR descriptor internal pointers */
+       void *tr_req_base;
+       struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_desc {
+       struct virt_dma_desc vd;
+
+       bool terminated;
+
+       enum dma_transfer_direction dir;
+
+       struct udma_static_tr static_tr;
+       u32 residue;
+
+       unsigned int sglen;
+       unsigned int desc_idx; /* Only used for cyclic in packet mode */
+       unsigned int tr_idx;
+
+       u32 metadata_size;
+       void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
+
+       unsigned int hwdesc_count;
+       struct udma_hwdesc hwdesc[0];
+};
+
+enum udma_chan_state {
+       UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
+       UDMA_CHAN_IS_ACTIVE, /* Normal operation */
+       UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
+};
+
+struct udma_tx_drain {
+       struct delayed_work work;
+       unsigned long jiffie;
+       u32 residue;
+};
+
+struct udma_chan_config {
+       bool pkt_mode; /* TR or packet */
+       bool needs_epib; /* EPIB is needed for the communication or not */
+       u32 psd_size; /* size of Protocol Specific Data */
+       u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
+       u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+       bool notdpkt; /* Suppress sending TDC packet */
+       int remote_thread_id;
+       u32 src_thread;
+       u32 dst_thread;
+       enum psil_endpoint_type ep_type;
+       bool enable_acc32;
+       bool enable_burst;
+       enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+
+       enum dma_transfer_direction dir;
+};
+
+struct udma_chan {
+       struct virt_dma_chan vc;
+       struct dma_slave_config cfg;
+       struct udma_dev *ud;
+       struct udma_desc *desc;
+       struct udma_desc *terminated_desc;
+       struct udma_static_tr static_tr;
+       char *name;
+
+       struct udma_tchan *tchan;
+       struct udma_rchan *rchan;
+       struct udma_rflow *rflow;
+
+       bool psil_paired;
+
+       int irq_num_ring;
+       int irq_num_udma;
+
+       bool cyclic;
+       bool paused;
+
+       enum udma_chan_state state;
+       struct completion teardown_completed;
+
+       struct udma_tx_drain tx_drain;
+
+       u32 bcnt; /* number of bytes completed since the start of the channel */
+       u32 in_ring_cnt; /* number of descriptors in flight */
+
+       /* Channel configuration parameters */
+       struct udma_chan_config config;
+
+       /* dmapool for packet mode descriptors */
+       bool use_dma_pool;
+       struct dma_pool *hdesc_pool;
+
+       u32 id;
+};
+
+static inline struct udma_dev *to_udma_dev(struct dma_device *d)
+{
+       return container_of(d, struct udma_dev, ddev);
+}
+
+static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct udma_chan, vc.chan);
+}
+
+static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct udma_desc, vd.tx);
+}
+
+/* Generic register access functions */
+static inline u32 udma_read(void __iomem *base, int reg)
+{
+       return readl(base + reg);
+}
+
+static inline void udma_write(void __iomem *base, int reg, u32 val)
+{
+       writel(val, base + reg);
+}
+
+static inline void udma_update_bits(void __iomem *base, int reg,
+                                   u32 mask, u32 val)
+{
+       u32 tmp, orig;
+
+       orig = readl(base + reg);
+       tmp = orig & ~mask;
+       tmp |= (val & mask);
+
+       if (tmp != orig)
+               writel(tmp, base + reg);
+}
+
+/* TCHANRT */
+static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
+{
+       if (!tchan)
+               return 0;
+       return udma_read(tchan->reg_rt, reg);
+}
+
+static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
+                                     u32 val)
+{
+       if (!tchan)
+               return;
+       udma_write(tchan->reg_rt, reg, val);
+}
+
+static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
+                                           u32 mask, u32 val)
+{
+       if (!tchan)
+               return;
+       udma_update_bits(tchan->reg_rt, reg, mask, val);
+}
+
+/* RCHANRT */
+static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
+{
+       if (!rchan)
+               return 0;
+       return udma_read(rchan->reg_rt, reg);
+}
+
+static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
+                                     u32 val)
+{
+       if (!rchan)
+               return;
+       udma_write(rchan->reg_rt, reg, val);
+}
+
+static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
+                                           u32 mask, u32 val)
+{
+       if (!rchan)
+               return;
+       udma_update_bits(rchan->reg_rt, reg, mask, val);
+}
+
+static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+       dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+       return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
+                                             tisci_rm->tisci_navss_dev_id,
+                                             src_thread, dst_thread);
+}
+
+static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+                            u32 dst_thread)
+{
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+       dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+       return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
+                                               tisci_rm->tisci_navss_dev_id,
+                                               src_thread, dst_thread);
+}
+
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+       memset(&uc->config, 0, sizeof(uc->config));
+       uc->config.remote_thread_id = -1;
+       uc->state = UDMA_CHAN_IS_IDLE;
+}
+
+static void udma_dump_chan_stdata(struct udma_chan *uc)
+{
+       struct device *dev = uc->ud->dev;
+       u32 offset;
+       int i;
+
+       if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
+               dev_dbg(dev, "TCHAN State data:\n");
+               for (i = 0; i < 32; i++) {
+                       offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
+                       dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
+                               udma_tchanrt_read(uc->tchan, offset));
+               }
+       }
+
+       if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
+               dev_dbg(dev, "RCHAN State data:\n");
+               for (i = 0; i < 32; i++) {
+                       offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
+                       dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
+                               udma_rchanrt_read(uc->rchan, offset));
+               }
+       }
+}
+
+static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
+                                                   int idx)
+{
+       return d->hwdesc[idx].cppi5_desc_paddr;
+}
+
+static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
+{
+       return d->hwdesc[idx].cppi5_desc_vaddr;
+}
+
+static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
+                                                  dma_addr_t paddr)
+{
+       struct udma_desc *d = uc->terminated_desc;
+
+       if (d) {
+               dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+                                                                  d->desc_idx);
+
+               if (desc_paddr != paddr)
+                       d = NULL;
+       }
+
+       if (!d) {
+               d = uc->desc;
+               if (d) {
+                       dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+                                                               d->desc_idx);
+
+                       if (desc_paddr != paddr)
+                               d = NULL;
+               }
+       }
+
+       return d;
+}
+
+static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
+{
+       if (uc->use_dma_pool) {
+               int i;
+
+               for (i = 0; i < d->hwdesc_count; i++) {
+                       if (!d->hwdesc[i].cppi5_desc_vaddr)
+                               continue;
+
+                       dma_pool_free(uc->hdesc_pool,
+                                     d->hwdesc[i].cppi5_desc_vaddr,
+                                     d->hwdesc[i].cppi5_desc_paddr);
+
+                       d->hwdesc[i].cppi5_desc_vaddr = NULL;
+               }
+       } else if (d->hwdesc[0].cppi5_desc_vaddr) {
+               struct udma_dev *ud = uc->ud;
+
+               dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+                                 d->hwdesc[0].cppi5_desc_vaddr,
+                                 d->hwdesc[0].cppi5_desc_paddr);
+
+               d->hwdesc[0].cppi5_desc_vaddr = NULL;
+       }
+}
+
+static void udma_purge_desc_work(struct work_struct *work)
+{
+       struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
+       struct virt_dma_desc *vd, *_vd;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&ud->lock, flags);
+       list_splice_tail_init(&ud->desc_to_purge, &head);
+       spin_unlock_irqrestore(&ud->lock, flags);
+
+       list_for_each_entry_safe(vd, _vd, &head, node) {
+               struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+               struct udma_desc *d = to_udma_desc(&vd->tx);
+
+               udma_free_hwdesc(uc, d);
+               list_del(&vd->node);
+               kfree(d);
+       }
+
+       /* If more to purge, schedule the work again */
+       if (!list_empty(&ud->desc_to_purge))
+               schedule_work(&ud->purge_work);
+}
+
+static void udma_desc_free(struct virt_dma_desc *vd)
+{
+       struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
+       struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+       struct udma_desc *d = to_udma_desc(&vd->tx);
+       unsigned long flags;
+
+       if (uc->terminated_desc == d)
+               uc->terminated_desc = NULL;
+
+       if (uc->use_dma_pool) {
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return;
+       }
+
+       spin_lock_irqsave(&ud->lock, flags);
+       list_add_tail(&vd->node, &ud->desc_to_purge);
+       spin_unlock_irqrestore(&ud->lock, flags);
+
+       schedule_work(&ud->purge_work);
+}
+
+static bool udma_is_chan_running(struct udma_chan *uc)
+{
+       u32 trt_ctl = 0;
+       u32 rrt_ctl = 0;
+
+       if (uc->tchan)
+               trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+       if (uc->rchan)
+               rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
+
+       if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
+               return true;
+
+       return false;
+}
+
+static bool udma_is_chan_paused(struct udma_chan *uc)
+{
+       u32 val, pause_mask;
+
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               val = udma_rchanrt_read(uc->rchan,
+                                       UDMA_RCHAN_RT_PEER_RT_EN_REG);
+               pause_mask = UDMA_PEER_RT_EN_PAUSE;
+               break;
+       case DMA_MEM_TO_DEV:
+               val = udma_tchanrt_read(uc->tchan,
+                                       UDMA_TCHAN_RT_PEER_RT_EN_REG);
+               pause_mask = UDMA_PEER_RT_EN_PAUSE;
+               break;
+       case DMA_MEM_TO_MEM:
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+               pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
+               break;
+       default:
+               return false;
+       }
+
+       if (val & pause_mask)
+               return true;
+
+       return false;
+}
+
+static void udma_sync_for_device(struct udma_chan *uc, int idx)
+{
+       struct udma_desc *d = uc->desc;
+
+       if (uc->cyclic && uc->config.pkt_mode) {
+               dma_sync_single_for_device(uc->ud->dev,
+                                          d->hwdesc[idx].cppi5_desc_paddr,
+                                          d->hwdesc[idx].cppi5_desc_size,
+                                          DMA_TO_DEVICE);
+       } else {
+               int i;
+
+               for (i = 0; i < d->hwdesc_count; i++) {
+                       if (!d->hwdesc[i].cppi5_desc_vaddr)
+                               continue;
+
+                       dma_sync_single_for_device(uc->ud->dev,
+                                               d->hwdesc[i].cppi5_desc_paddr,
+                                               d->hwdesc[i].cppi5_desc_size,
+                                               DMA_TO_DEVICE);
+               }
+       }
+}
+
+static int udma_push_to_ring(struct udma_chan *uc, int idx)
+{
+       struct udma_desc *d = uc->desc;
+
+       struct k3_ring *ring = NULL;
+       int ret = -EINVAL;
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               ring = uc->rflow->fd_ring;
+               break;
+       case DMA_MEM_TO_DEV:
+       case DMA_MEM_TO_MEM:
+               ring = uc->tchan->t_ring;
+               break;
+       default:
+               break;
+       }
+
+       if (ring) {
+               dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+
+               wmb(); /* Ensure that writes are not moved over this point */
+               udma_sync_for_device(uc, idx);
+               ret = k3_ringacc_ring_push(ring, &desc_addr);
+               uc->in_ring_cnt++;
+       }
+
+       return ret;
+}
+
+static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
+{
+       struct k3_ring *ring = NULL;
+       int ret = -ENOENT;
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               ring = uc->rflow->r_ring;
+               break;
+       case DMA_MEM_TO_DEV:
+       case DMA_MEM_TO_MEM:
+               ring = uc->tchan->tc_ring;
+               break;
+       default:
+               break;
+       }
+
+       if (ring && k3_ringacc_ring_get_occ(ring)) {
+               struct udma_desc *d = NULL;
+
+               ret = k3_ringacc_ring_pop(ring, addr);
+               if (ret)
+                       return ret;
+
+               /* Teardown completion */
+               if (cppi5_desc_is_tdcm(*addr))
+                       return ret;
+
+               d = udma_udma_desc_from_paddr(uc, *addr);
+
+               if (d)
+                       dma_sync_single_for_cpu(uc->ud->dev, *addr,
+                                               d->hwdesc[0].cppi5_desc_size,
+                                               DMA_FROM_DEVICE);
+               rmb(); /* Ensure that reads are not moved before this point */
+
+               if (!ret)
+                       uc->in_ring_cnt--;
+       }
+
+       return ret;
+}
+
+static void udma_reset_rings(struct udma_chan *uc)
+{
+       struct k3_ring *ring1 = NULL;
+       struct k3_ring *ring2 = NULL;
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               if (uc->rchan) {
+                       ring1 = uc->rflow->fd_ring;
+                       ring2 = uc->rflow->r_ring;
+               }
+               break;
+       case DMA_MEM_TO_DEV:
+       case DMA_MEM_TO_MEM:
+               if (uc->tchan) {
+                       ring1 = uc->tchan->t_ring;
+                       ring2 = uc->tchan->tc_ring;
+               }
+               break;
+       default:
+               break;
+       }
+
+       if (ring1)
+               k3_ringacc_ring_reset_dma(ring1,
+                                         k3_ringacc_ring_get_occ(ring1));
+       if (ring2)
+               k3_ringacc_ring_reset(ring2);
+
+       /* make sure we are not leaking memory by stalled descriptor */
+       if (uc->terminated_desc) {
+               udma_desc_free(&uc->terminated_desc->vd);
+               uc->terminated_desc = NULL;
+       }
+
+       uc->in_ring_cnt = 0;
+}
+
+static void udma_reset_counters(struct udma_chan *uc)
+{
+       u32 val;
+
+       if (uc->tchan) {
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
+
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
+
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
+
+               val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
+       }
+
+       if (uc->rchan) {
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
+
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
+
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
+
+               val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
+       }
+
+       uc->bcnt = 0;
+}
+
+static int udma_reset_chan(struct udma_chan *uc, bool hard)
+{
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Reset all counters */
+       udma_reset_counters(uc);
+
+       /* Hard reset: re-initialize the channel to reset */
+       if (hard) {
+               struct udma_chan_config ucc_backup;
+               int ret;
+
+               memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
+               uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
+
+               /* restore the channel configuration */
+               memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
+               ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
+               if (ret)
+                       return ret;
+
+               /*
+                * Setting forced teardown after forced reset helps recovering
+                * the rchan.
+                */
+               if (uc->config.dir == DMA_DEV_TO_MEM)
+                       udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+                                          UDMA_CHAN_RT_CTL_EN |
+                                          UDMA_CHAN_RT_CTL_TDOWN |
+                                          UDMA_CHAN_RT_CTL_FTDOWN);
+       }
+       uc->state = UDMA_CHAN_IS_IDLE;
+
+       return 0;
+}
+
+static void udma_start_desc(struct udma_chan *uc)
+{
+       struct udma_chan_config *ucc = &uc->config;
+
+       if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+               int i;
+
+               /* Push all descriptors to ring for packet mode cyclic or RX */
+               for (i = 0; i < uc->desc->sglen; i++)
+                       udma_push_to_ring(uc, i);
+       } else {
+               udma_push_to_ring(uc, 0);
+       }
+}
+
+static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
+{
+       /* Only PDMAs have staticTR */
+       if (uc->config.ep_type == PSIL_EP_NATIVE)
+               return false;
+
+       /* Check if the staticTR configuration has changed for TX */
+       if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
+               return true;
+
+       return false;
+}
+
+static int udma_start(struct udma_chan *uc)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
+
+       if (!vd) {
+               uc->desc = NULL;
+               return -ENOENT;
+       }
+
+       list_del(&vd->node);
+
+       uc->desc = to_udma_desc(&vd->tx);
+
+       /* Channel is already running and does not need reconfiguration */
+       if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
+               udma_start_desc(uc);
+               goto out;
+       }
+
+       /* Make sure that we clear the teardown bit, if it is set */
+       udma_reset_chan(uc, false);
+
+       /* Push descriptors before we start the channel */
+       udma_start_desc(uc);
+
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               /* Config remote TR */
+               if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+                       u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+                                 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+                       const struct udma_match_data *match_data =
+                                                       uc->ud->match_data;
+
+                       if (uc->config.enable_acc32)
+                               val |= PDMA_STATIC_TR_XY_ACC32;
+                       if (uc->config.enable_burst)
+                               val |= PDMA_STATIC_TR_XY_BURST;
+
+                       udma_rchanrt_write(uc->rchan,
+                               UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+                       udma_rchanrt_write(uc->rchan,
+                               UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
+                               PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
+                                                match_data->statictr_z_mask));
+
+                       /* save the current staticTR configuration */
+                       memcpy(&uc->static_tr, &uc->desc->static_tr,
+                              sizeof(uc->static_tr));
+               }
+
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+
+               /* Enable remote */
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE);
+
+               break;
+       case DMA_MEM_TO_DEV:
+               /* Config remote TR */
+               if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+                       u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+                                 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+
+                       if (uc->config.enable_acc32)
+                               val |= PDMA_STATIC_TR_XY_ACC32;
+                       if (uc->config.enable_burst)
+                               val |= PDMA_STATIC_TR_XY_BURST;
+
+                       udma_tchanrt_write(uc->tchan,
+                               UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+                       /* save the current staticTR configuration */
+                       memcpy(&uc->static_tr, &uc->desc->static_tr,
+                              sizeof(uc->static_tr));
+               }
+
+               /* Enable remote */
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE);
+
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN);
+
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       uc->state = UDMA_CHAN_IS_ACTIVE;
+out:
+
+       return 0;
+}
+
+static int udma_stop(struct udma_chan *uc)
+{
+       enum udma_chan_state old_state = uc->state;
+
+       uc->state = UDMA_CHAN_IS_TERMINATING;
+       reinit_completion(&uc->teardown_completed);
+
+       switch (uc->config.dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE |
+                                  UDMA_PEER_RT_EN_TEARDOWN);
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                  UDMA_PEER_RT_EN_ENABLE |
+                                  UDMA_PEER_RT_EN_FLUSH);
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN |
+                                  UDMA_CHAN_RT_CTL_TDOWN);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                  UDMA_CHAN_RT_CTL_EN |
+                                  UDMA_CHAN_RT_CTL_TDOWN);
+               break;
+       default:
+               uc->state = old_state;
+               complete_all(&uc->teardown_completed);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
+{
+       struct udma_desc *d = uc->desc;
+       struct cppi5_host_desc_t *h_desc;
+
+       h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
+       cppi5_hdesc_reset_to_original(h_desc);
+       udma_push_to_ring(uc, d->desc_idx);
+       d->desc_idx = (d->desc_idx + 1) % d->sglen;
+}
+
+static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
+{
+       struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+       memcpy(d->metadata, h_desc->epib, d->metadata_size);
+}
+
+static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
+{
+       u32 peer_bcnt, bcnt;
+
+       /* Only TX towards PDMA is affected */
+       if (uc->config.ep_type == PSIL_EP_NATIVE ||
+           uc->config.dir != DMA_MEM_TO_DEV)
+               return true;
+
+       peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+       bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+
+       if (peer_bcnt < bcnt) {
+               uc->tx_drain.residue = bcnt - peer_bcnt;
+               uc->tx_drain.jiffie = jiffies;
+               return false;
+       }
+
+       return true;
+}
+
+static void udma_check_tx_completion(struct work_struct *work)
+{
+       struct udma_chan *uc = container_of(work, typeof(*uc),
+                                           tx_drain.work.work);
+       bool desc_done = true;
+       u32 residue_diff;
+       unsigned long jiffie_diff, delay;
+
+       if (uc->desc) {
+               residue_diff = uc->tx_drain.residue;
+               jiffie_diff = uc->tx_drain.jiffie;
+               desc_done = udma_is_desc_really_done(uc, uc->desc);
+       }
+
+       if (!desc_done) {
+               jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+               residue_diff -= uc->tx_drain.residue;
+               if (residue_diff) {
+                       /* Try to guess when we should check next time */
+                       residue_diff /= jiffie_diff;
+                       delay = uc->tx_drain.residue / residue_diff / 3;
+                       if (jiffies_to_msecs(delay) < 5)
+                               delay = 0;
+               } else {
+                       /* No progress, check again in 1 second  */
+                       delay = HZ;
+               }
+
+               schedule_delayed_work(&uc->tx_drain.work, delay);
+       } else if (uc->desc) {
+               struct udma_desc *d = uc->desc;
+
+               uc->bcnt += d->residue;
+               udma_start(uc);
+               vchan_cookie_complete(&d->vd);
+       }
+}
+
+static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+{
+       struct udma_chan *uc = data;
+       struct udma_desc *d;
+       unsigned long flags;
+       dma_addr_t paddr = 0;
+
+       if (udma_pop_from_ring(uc, &paddr) || !paddr)
+               return IRQ_HANDLED;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       /* Teardown completion message */
+       if (cppi5_desc_is_tdcm(paddr)) {
+               /* Compensate our internal pop/push counter */
+               uc->in_ring_cnt++;
+
+               complete_all(&uc->teardown_completed);
+
+               if (uc->terminated_desc) {
+                       udma_desc_free(&uc->terminated_desc->vd);
+                       uc->terminated_desc = NULL;
+               }
+
+               if (!uc->desc)
+                       udma_start(uc);
+
+               goto out;
+       }
+
+       d = udma_udma_desc_from_paddr(uc, paddr);
+
+       if (d) {
+               dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+                                                                  d->desc_idx);
+               if (desc_paddr != paddr) {
+                       dev_err(uc->ud->dev, "not matching descriptors!\n");
+                       goto out;
+               }
+
+               if (uc->cyclic) {
+                       /* push the descriptor back to the ring */
+                       if (d == uc->desc) {
+                               udma_cyclic_packet_elapsed(uc);
+                               vchan_cyclic_callback(&d->vd);
+                       }
+               } else {
+                       bool desc_done = false;
+
+                       if (d == uc->desc) {
+                               desc_done = udma_is_desc_really_done(uc, d);
+
+                               if (desc_done) {
+                                       uc->bcnt += d->residue;
+                                       udma_start(uc);
+                               } else {
+                                       schedule_delayed_work(&uc->tx_drain.work,
+                                                             0);
+                               }
+                       }
+
+                       if (desc_done)
+                               vchan_cookie_complete(&d->vd);
+               }
+       }
+out:
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t udma_udma_irq_handler(int irq, void *data)
+{
+       struct udma_chan *uc = data;
+       struct udma_desc *d;
+       unsigned long flags;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+       d = uc->desc;
+       if (d) {
+               d->tr_idx = (d->tr_idx + 1) % d->sglen;
+
+               if (uc->cyclic) {
+                       vchan_cyclic_callback(&d->vd);
+               } else {
+                       /* TODO: figure out the real amount of data */
+                       uc->bcnt += d->residue;
+                       udma_start(uc);
+                       vchan_cookie_complete(&d->vd);
+               }
+       }
+
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       int start, tmp_from;
+       DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+       tmp_from = from;
+       if (tmp_from < 0)
+               tmp_from = ud->rchan_cnt;
+       /* default flows can't be allocated and accessible only by id */
+       if (tmp_from < ud->rchan_cnt)
+               return -EINVAL;
+
+       if (tmp_from + cnt > ud->rflow_cnt)
+               return -EINVAL;
+
+       bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+                 ud->rflow_cnt);
+
+       start = bitmap_find_next_zero_area(tmp,
+                                          ud->rflow_cnt,
+                                          tmp_from, cnt, 0);
+       if (start >= ud->rflow_cnt)
+               return -ENOMEM;
+
+       if (from >= 0 && start != from)
+               return -EEXIST;
+
+       bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+       return start;
+}
+
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+       if (from < ud->rchan_cnt)
+               return -EINVAL;
+       if (from + cnt > ud->rflow_cnt)
+               return -EINVAL;
+
+       bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+       return 0;
+}
+
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+       /*
+        * Attempt to request rflow by ID can be made for any rflow
+        * if not in use with assumption that caller knows what's doing.
+        * TI-SCI FW will perform additional permission check ant way, it's
+        * safe
+        */
+
+       if (id < 0 || id >= ud->rflow_cnt)
+               return ERR_PTR(-ENOENT);
+
+       if (test_bit(id, ud->rflow_in_use))
+               return ERR_PTR(-ENOENT);
+
+       /* GP rflow has to be allocated first */
+       if (!test_bit(id, ud->rflow_gp_map) &&
+           !test_bit(id, ud->rflow_gp_map_allocated))
+               return ERR_PTR(-EINVAL);
+
+       dev_dbg(ud->dev, "get rflow%d\n", id);
+       set_bit(id, ud->rflow_in_use);
+       return &ud->rflows[id];
+}
+
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+       if (!test_bit(rflow->id, ud->rflow_in_use)) {
+               dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+               return;
+       }
+
+       dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+       clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res)                                     \
+static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,    \
+                                              enum udma_tp_level tpl,  \
+                                              int id)                  \
+{                                                                      \
+       if (id >= 0) {                                                  \
+               if (test_bit(id, ud->res##_map)) {                      \
+                       dev_err(ud->dev, "res##%d is in use\n", id);    \
+                       return ERR_PTR(-ENOENT);                        \
+               }                                                       \
+       } else {                                                        \
+               int start;                                              \
+                                                                       \
+               if (tpl >= ud->match_data->tpl_levels)                  \
+                       tpl = ud->match_data->tpl_levels - 1;           \
+                                                                       \
+               start = ud->match_data->level_start_idx[tpl];           \
+                                                                       \
+               id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,   \
+                                       start);                         \
+               if (id == ud->res##_cnt) {                              \
+                       return ERR_PTR(-ENOENT);                        \
+               }                                                       \
+       }                                                               \
+                                                                       \
+       set_bit(id, ud->res##_map);                                     \
+       return &ud->res##s[id];                                         \
+}
+
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+static int udma_get_tchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->tchan) {
+               dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+                       uc->id, uc->tchan->id);
+               return 0;
+       }
+
+       uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+       if (IS_ERR(uc->tchan))
+               return PTR_ERR(uc->tchan);
+
+       return 0;
+}
+
+static int udma_get_rchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->rchan) {
+               dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+                       uc->id, uc->rchan->id);
+               return 0;
+       }
+
+       uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+       if (IS_ERR(uc->rchan))
+               return PTR_ERR(uc->rchan);
+
+       return 0;
+}
+
+static int udma_get_chan_pair(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       const struct udma_match_data *match_data = ud->match_data;
+       int chan_id, end;
+
+       if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+               dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+                        uc->id, uc->tchan->id);
+               return 0;
+       }
+
+       if (uc->tchan) {
+               dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+                       uc->id, uc->tchan->id);
+               return -EBUSY;
+       } else if (uc->rchan) {
+               dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+                       uc->id, uc->rchan->id);
+               return -EBUSY;
+       }
+
+       /* Can be optimized, but let's have it like this for now */
+       end = min(ud->tchan_cnt, ud->rchan_cnt);
+       /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
+       chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
+       for (; chan_id < end; chan_id++) {
+               if (!test_bit(chan_id, ud->tchan_map) &&
+                   !test_bit(chan_id, ud->rchan_map))
+                       break;
+       }
+
+       if (chan_id == end)
+               return -ENOENT;
+
+       set_bit(chan_id, ud->tchan_map);
+       set_bit(chan_id, ud->rchan_map);
+       uc->tchan = &ud->tchans[chan_id];
+       uc->rchan = &ud->rchans[chan_id];
+
+       return 0;
+}
+
+static int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (!uc->rchan) {
+               dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+               return -EINVAL;
+       }
+
+       if (uc->rflow) {
+               dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+                       uc->id, uc->rflow->id);
+               return 0;
+       }
+
+       uc->rflow = __udma_get_rflow(ud, flow_id);
+       if (IS_ERR(uc->rflow))
+               return PTR_ERR(uc->rflow);
+
+       return 0;
+}
+
+static void udma_put_rchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->rchan) {
+               dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+                       uc->rchan->id);
+               clear_bit(uc->rchan->id, ud->rchan_map);
+               uc->rchan = NULL;
+       }
+}
+
+static void udma_put_tchan(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->tchan) {
+               dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+                       uc->tchan->id);
+               clear_bit(uc->tchan->id, ud->tchan_map);
+               uc->tchan = NULL;
+       }
+}
+
+static void udma_put_rflow(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+
+       if (uc->rflow) {
+               dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+                       uc->rflow->id);
+               __udma_put_rflow(ud, uc->rflow);
+               uc->rflow = NULL;
+       }
+}
+
+static void udma_free_tx_resources(struct udma_chan *uc)
+{
+       if (!uc->tchan)
+               return;
+
+       k3_ringacc_ring_free(uc->tchan->t_ring);
+       k3_ringacc_ring_free(uc->tchan->tc_ring);
+       uc->tchan->t_ring = NULL;
+       uc->tchan->tc_ring = NULL;
+
+       udma_put_tchan(uc);
+}
+
+static int udma_alloc_tx_resources(struct udma_chan *uc)
+{
+       struct k3_ring_cfg ring_cfg;
+       struct udma_dev *ud = uc->ud;
+       int ret;
+
+       ret = udma_get_tchan(uc);
+       if (ret)
+               return ret;
+
+       uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
+                                                   uc->tchan->id, 0);
+       if (!uc->tchan->t_ring) {
+               ret = -EBUSY;
+               goto err_tx_ring;
+       }
+
+       uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+       if (!uc->tchan->tc_ring) {
+               ret = -EBUSY;
+               goto err_txc_ring;
+       }
+
+       memset(&ring_cfg, 0, sizeof(ring_cfg));
+       ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+       ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+       ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+       ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
+       ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+
+       if (ret)
+               goto err_ringcfg;
+
+       return 0;
+
+err_ringcfg:
+       k3_ringacc_ring_free(uc->tchan->tc_ring);
+       uc->tchan->tc_ring = NULL;
+err_txc_ring:
+       k3_ringacc_ring_free(uc->tchan->t_ring);
+       uc->tchan->t_ring = NULL;
+err_tx_ring:
+       udma_put_tchan(uc);
+
+       return ret;
+}
+
+static void udma_free_rx_resources(struct udma_chan *uc)
+{
+       if (!uc->rchan)
+               return;
+
+       if (uc->rflow) {
+               struct udma_rflow *rflow = uc->rflow;
+
+               k3_ringacc_ring_free(rflow->fd_ring);
+               k3_ringacc_ring_free(rflow->r_ring);
+               rflow->fd_ring = NULL;
+               rflow->r_ring = NULL;
+
+               udma_put_rflow(uc);
+       }
+
+       udma_put_rchan(uc);
+}
+
+static int udma_alloc_rx_resources(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct k3_ring_cfg ring_cfg;
+       struct udma_rflow *rflow;
+       int fd_ring_id;
+       int ret;
+
+       ret = udma_get_rchan(uc);
+       if (ret)
+               return ret;
+
+       /* For MEM_TO_MEM we don't need rflow or rings */
+       if (uc->config.dir == DMA_MEM_TO_MEM)
+               return 0;
+
+       ret = udma_get_rflow(uc, uc->rchan->id);
+       if (ret) {
+               ret = -EBUSY;
+               goto err_rflow;
+       }
+
+       rflow = uc->rflow;
+       fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+       rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
+       if (!rflow->fd_ring) {
+               ret = -EBUSY;
+               goto err_rx_ring;
+       }
+
+       rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+       if (!rflow->r_ring) {
+               ret = -EBUSY;
+               goto err_rxc_ring;
+       }
+
+       memset(&ring_cfg, 0, sizeof(ring_cfg));
+
+       if (uc->config.pkt_mode)
+               ring_cfg.size = SG_MAX_SEGMENTS;
+       else
+               ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+       ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+       ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+       ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+       ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+       ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
+
+       if (ret)
+               goto err_ringcfg;
+
+       return 0;
+
+err_ringcfg:
+       k3_ringacc_ring_free(rflow->r_ring);
+       rflow->r_ring = NULL;
+err_rxc_ring:
+       k3_ringacc_ring_free(rflow->fd_ring);
+       rflow->fd_ring = NULL;
+err_rx_ring:
+       udma_put_rflow(uc);
+err_rflow:
+       udma_put_rchan(uc);
+
+       return ret;
+}
+
+#define TISCI_TCHAN_VALID_PARAMS (                             \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+
+#define TISCI_RCHAN_VALID_PARAMS (                             \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
+       TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+
+static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+       struct udma_tchan *tchan = uc->tchan;
+       struct udma_rchan *rchan = uc->rchan;
+       int ret = 0;
+
+       /* Non synchronized - mem to mem type of transfer */
+       int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+       struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+       struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+
+       req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+       req_tx.nav_id = tisci_rm->tisci_dev_id;
+       req_tx.index = tchan->id;
+       req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+       req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+       req_tx.txcq_qnum = tc_ring;
+
+       ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+       if (ret) {
+               dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+               return ret;
+       }
+
+       req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+       req_rx.nav_id = tisci_rm->tisci_dev_id;
+       req_rx.index = rchan->id;
+       req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+       req_rx.rxcq_qnum = tc_ring;
+       req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+
+       ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+       if (ret)
+               dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
+
+       return ret;
+}
+
+static int udma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+       struct udma_tchan *tchan = uc->tchan;
+       int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+       struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+       u32 mode, fetch_size;
+       int ret = 0;
+
+       if (uc->config.pkt_mode) {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+               fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+                                                  uc->config.psd_size, 0);
+       } else {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+               fetch_size = sizeof(struct cppi5_desc_hdr_t);
+       }
+
+       req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+       req_tx.nav_id = tisci_rm->tisci_dev_id;
+       req_tx.index = tchan->id;
+       req_tx.tx_chan_type = mode;
+       req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+       req_tx.tx_fetch_size = fetch_size >> 2;
+       req_tx.txcq_qnum = tc_ring;
+
+       ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+       if (ret)
+               dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+       return ret;
+}
+
+static int udma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+       struct udma_dev *ud = uc->ud;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+       struct udma_rchan *rchan = uc->rchan;
+       int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
+       int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+       struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+       u32 mode, fetch_size;
+       int ret = 0;
+
+       if (uc->config.pkt_mode) {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+               fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+                                                  uc->config.psd_size, 0);
+       } else {
+               mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+               fetch_size = sizeof(struct cppi5_desc_hdr_t);
+       }
+
+       req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+       req_rx.nav_id = tisci_rm->tisci_dev_id;
+       req_rx.index = rchan->id;
+       req_rx.rx_fetch_size =  fetch_size >> 2;
+       req_rx.rxcq_qnum = rx_ring;
+       req_rx.rx_chan_type = mode;
+
+       ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+       if (ret) {
+               dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+               return ret;
+       }
+
+       flow_req.valid_params =
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+               TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+
+       flow_req.nav_id = tisci_rm->tisci_dev_id;
+       flow_req.flow_index = rchan->id;
+
+       if (uc->config.needs_epib)
+               flow_req.rx_einfo_present = 1;
+       else
+               flow_req.rx_einfo_present = 0;
+       if (uc->config.psd_size)
+               flow_req.rx_psinfo_present = 1;
+       else
+               flow_req.rx_psinfo_present = 0;
+       flow_req.rx_error_handling = 1;
+       flow_req.rx_dest_qnum = rx_ring;
+       flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
+       flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
+       flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
+       flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
+       flow_req.rx_fdq0_sz0_qnum = fd_ring;
+       flow_req.rx_fdq1_qnum = fd_ring;
+       flow_req.rx_fdq2_qnum = fd_ring;
+       flow_req.rx_fdq3_qnum = fd_ring;
+
+       ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+       if (ret)
+               dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
+
+       return 0;
+}
+
+static int udma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       struct udma_dev *ud = to_udma_dev(chan->device);
+       const struct udma_match_data *match_data = ud->match_data;
+       struct k3_ring *irq_ring;
+       u32 irq_udma_idx;
+       int ret;
+
+       if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
+               uc->use_dma_pool = true;
+               /* in case of MEM_TO_MEM we have maximum of two TRs */
+               if (uc->config.dir == DMA_MEM_TO_MEM) {
+                       uc->config.hdesc_size = cppi5_trdesc_calc_size(
+                                       sizeof(struct cppi5_tr_type15_t), 2);
+                       uc->config.pkt_mode = false;
+               }
+       }
+
+       if (uc->use_dma_pool) {
+               uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+                                                uc->config.hdesc_size,
+                                                ud->desc_align,
+                                                0);
+               if (!uc->hdesc_pool) {
+                       dev_err(ud->ddev.dev,
+                               "Descriptor pool allocation failed\n");
+                       uc->use_dma_pool = false;
+                       return -ENOMEM;
+               }
+       }
+
+       /*
+        * Make sure that the completion is in a known state:
+        * No teardown, the channel is idle
+        */
+       reinit_completion(&uc->teardown_completed);
+       complete_all(&uc->teardown_completed);
+       uc->state = UDMA_CHAN_IS_IDLE;
+
+       switch (uc->config.dir) {
+       case DMA_MEM_TO_MEM:
+               /* Non synchronized - mem to mem type of transfer */
+               dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+                       uc->id);
+
+               ret = udma_get_chan_pair(uc);
+               if (ret)
+                       return ret;
+
+               ret = udma_alloc_tx_resources(uc);
+               if (ret)
+                       return ret;
+
+               ret = udma_alloc_rx_resources(uc);
+               if (ret) {
+                       udma_free_tx_resources(uc);
+                       return ret;
+               }
+
+               uc->config.src_thread = ud->psil_base + uc->tchan->id;
+               uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+                                       K3_PSIL_DST_THREAD_ID_OFFSET;
+
+               irq_ring = uc->tchan->tc_ring;
+               irq_udma_idx = uc->tchan->id;
+
+               ret = udma_tisci_m2m_channel_config(uc);
+               break;
+       case DMA_MEM_TO_DEV:
+               /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+               dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+                       uc->id);
+
+               ret = udma_alloc_tx_resources(uc);
+               if (ret) {
+                       uc->config.remote_thread_id = -1;
+                       return ret;
+               }
+
+               uc->config.src_thread = ud->psil_base + uc->tchan->id;
+               uc->config.dst_thread = uc->config.remote_thread_id;
+               uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+               irq_ring = uc->tchan->tc_ring;
+               irq_udma_idx = uc->tchan->id;
+
+               ret = udma_tisci_tx_channel_config(uc);
+               break;
+       case DMA_DEV_TO_MEM:
+               /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+               dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+                       uc->id);
+
+               ret = udma_alloc_rx_resources(uc);
+               if (ret) {
+                       uc->config.remote_thread_id = -1;
+                       return ret;
+               }
+
+               uc->config.src_thread = uc->config.remote_thread_id;
+               uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+                                       K3_PSIL_DST_THREAD_ID_OFFSET;
+
+               irq_ring = uc->rflow->r_ring;
+               irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+
+               ret = udma_tisci_rx_channel_config(uc);
+               break;
+       default:
+               /* Can not happen */
+               dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+                       __func__, uc->id, uc->config.dir);
+               return -EINVAL;
+       }
+
+       /* check if the channel configuration was successful */
+       if (ret)
+               goto err_res_free;
+
+       if (udma_is_chan_running(uc)) {
+               dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+               udma_stop(uc);
+               if (udma_is_chan_running(uc)) {
+                       dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+                       goto err_res_free;
+               }
+       }
+
+       /* PSI-L pairing */
+       ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+       if (ret) {
+               dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+                       uc->config.src_thread, uc->config.dst_thread);
+               goto err_res_free;
+       }
+
+       uc->psil_paired = true;
+
+       uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
+       if (uc->irq_num_ring <= 0) {
+               dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+                       k3_ringacc_get_ring_id(irq_ring));
+               ret = -EINVAL;
+               goto err_psi_free;
+       }
+
+       ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+                         IRQF_TRIGGER_HIGH, uc->name, uc);
+       if (ret) {
+               dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+               goto err_irq_free;
+       }
+
+       /* Event from UDMA (TR events) only needed for slave TR mode channels */
+       if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
+               uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+                                                           irq_udma_idx);
+               if (uc->irq_num_udma <= 0) {
+                       dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
+                               irq_udma_idx);
+                       free_irq(uc->irq_num_ring, uc);
+                       ret = -EINVAL;
+                       goto err_irq_free;
+               }
+
+               ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+                                 uc->name, uc);
+               if (ret) {
+                       dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
+                               uc->id);
+                       free_irq(uc->irq_num_ring, uc);
+                       goto err_irq_free;
+               }
+       } else {
+               uc->irq_num_udma = 0;
+       }
+
+       udma_reset_rings(uc);
+
+       INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+                                 udma_check_tx_completion);
+       return 0;
+
+err_irq_free:
+       uc->irq_num_ring = 0;
+       uc->irq_num_udma = 0;
+err_psi_free:
+       navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+       uc->psil_paired = false;
+err_res_free:
+       udma_free_tx_resources(uc);
+       udma_free_rx_resources(uc);
+
+       udma_reset_uchan(uc);
+
+       if (uc->use_dma_pool) {
+               dma_pool_destroy(uc->hdesc_pool);
+               uc->use_dma_pool = false;
+       }
+
+       return ret;
+}
+
+static int udma_slave_config(struct dma_chan *chan,
+                            struct dma_slave_config *cfg)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+
+       memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
+
+       return 0;
+}
+
+static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
+                                           size_t tr_size, int tr_count,
+                                           enum dma_transfer_direction dir)
+{
+       struct udma_hwdesc *hwdesc;
+       struct cppi5_desc_hdr_t *tr_desc;
+       struct udma_desc *d;
+       u32 reload_count = 0;
+       u32 ring_id;
+
+       switch (tr_size) {
+       case 16:
+       case 32:
+       case 64:
+       case 128:
+               break;
+       default:
+               dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
+               return NULL;
+       }
+
+       /* We have only one descriptor containing multiple TRs */
+       d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->sglen = tr_count;
+
+       d->hwdesc_count = 1;
+       hwdesc = &d->hwdesc[0];
+
+       /* Allocate memory for DMA ring descriptor */
+       if (uc->use_dma_pool) {
+               hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+               hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+                                               GFP_NOWAIT,
+                                               &hwdesc->cppi5_desc_paddr);
+       } else {
+               hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
+                                                                tr_count);
+               hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+                                               uc->ud->desc_align);
+               hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
+                                               hwdesc->cppi5_desc_size,
+                                               &hwdesc->cppi5_desc_paddr,
+                                               GFP_NOWAIT);
+       }
+
+       if (!hwdesc->cppi5_desc_vaddr) {
+               kfree(d);
+               return NULL;
+       }
+
+       /* Start of the TR req records */
+       hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+       /* Start address of the TR response array */
+       hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
+
+       tr_desc = hwdesc->cppi5_desc_vaddr;
+
+       if (uc->cyclic)
+               reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
+
+       if (dir == DMA_DEV_TO_MEM)
+               ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       else
+               ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+       cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
+       cppi5_desc_set_pktids(tr_desc, uc->id,
+                             CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+       cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
+
+       return d;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
+                     unsigned int sglen, enum dma_transfer_direction dir,
+                     unsigned long tx_flags, void *context)
+{
+       enum dma_slave_buswidth dev_width;
+       struct scatterlist *sgent;
+       struct udma_desc *d;
+       size_t tr_size;
+       struct cppi5_tr_type1_t *tr_req = NULL;
+       unsigned int i;
+       u32 burst;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       /* Now allocate and setup the descriptor. */
+       tr_size = sizeof(struct cppi5_tr_type1_t);
+       d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+       if (!d)
+               return NULL;
+
+       d->sglen = sglen;
+
+       tr_req = d->hwdesc[0].tr_req_base;
+       for_each_sg(sgl, sgent, sglen, i) {
+               d->residue += sg_dma_len(sgent);
+
+               cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+                             CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+               cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+               tr_req[i].addr = sg_dma_address(sgent);
+               tr_req[i].icnt0 = burst * dev_width;
+               tr_req[i].dim1 = burst * dev_width;
+               tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+       }
+
+       cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+
+       return d;
+}
+
+static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
+                                  enum dma_slave_buswidth dev_width,
+                                  u16 elcnt)
+{
+       if (uc->config.ep_type != PSIL_EP_PDMA_XY)
+               return 0;
+
+       /* Bus width translates to the element size (ES) */
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               d->static_tr.elsize = 0;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               d->static_tr.elsize = 1;
+               break;
+       case DMA_SLAVE_BUSWIDTH_3_BYTES:
+               d->static_tr.elsize = 2;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               d->static_tr.elsize = 3;
+               break;
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               d->static_tr.elsize = 4;
+               break;
+       default: /* not reached */
+               return -EINVAL;
+       }
+
+       d->static_tr.elcnt = elcnt;
+
+       /*
+        * PDMA must to close the packet when the channel is in packet mode.
+        * For TR mode when the channel is not cyclic we also need PDMA to close
+        * the packet otherwise the transfer will stall because PDMA holds on
+        * the data it has received from the peripheral.
+        */
+       if (uc->config.pkt_mode || !uc->cyclic) {
+               unsigned int div = dev_width * elcnt;
+
+               if (uc->cyclic)
+                       d->static_tr.bstcnt = d->residue / d->sglen / div;
+               else
+                       d->static_tr.bstcnt = d->residue / div;
+
+               if (uc->config.dir == DMA_DEV_TO_MEM &&
+                   d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+                       return -EINVAL;
+       } else {
+               d->static_tr.bstcnt = 0;
+       }
+
+       return 0;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
+                      unsigned int sglen, enum dma_transfer_direction dir,
+                      unsigned long tx_flags, void *context)
+{
+       struct scatterlist *sgent;
+       struct cppi5_host_desc_t *h_desc = NULL;
+       struct udma_desc *d;
+       u32 ring_id;
+       unsigned int i;
+
+       d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->sglen = sglen;
+       d->hwdesc_count = sglen;
+
+       if (dir == DMA_DEV_TO_MEM)
+               ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       else
+               ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+       for_each_sg(sgl, sgent, sglen, i) {
+               struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+               dma_addr_t sg_addr = sg_dma_address(sgent);
+               struct cppi5_host_desc_t *desc;
+               size_t sg_len = sg_dma_len(sgent);
+
+               hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+                                               GFP_NOWAIT,
+                                               &hwdesc->cppi5_desc_paddr);
+               if (!hwdesc->cppi5_desc_vaddr) {
+                       dev_err(uc->ud->dev,
+                               "descriptor%d allocation failed\n", i);
+
+                       udma_free_hwdesc(uc, d);
+                       kfree(d);
+                       return NULL;
+               }
+
+               d->residue += sg_len;
+               hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+               desc = hwdesc->cppi5_desc_vaddr;
+
+               if (i == 0) {
+                       cppi5_hdesc_init(desc, 0, 0);
+                       /* Flow and Packed ID */
+                       cppi5_desc_set_pktids(&desc->hdr, uc->id,
+                                             CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+                       cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
+               } else {
+                       cppi5_hdesc_reset_hbdesc(desc);
+                       cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
+               }
+
+               /* attach the sg buffer to the descriptor */
+               cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
+
+               /* Attach link as host buffer descriptor */
+               if (h_desc)
+                       cppi5_hdesc_link_hbdesc(h_desc,
+                                               hwdesc->cppi5_desc_paddr);
+
+               if (dir == DMA_MEM_TO_DEV)
+                       h_desc = desc;
+       }
+
+       if (d->residue >= SZ_4M) {
+               dev_err(uc->ud->dev,
+                       "%s: Transfer size %u is over the supported 4M range\n",
+                       __func__, d->residue);
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return NULL;
+       }
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+       cppi5_hdesc_set_pktlen(h_desc, d->residue);
+
+       return d;
+}
+
+static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
+                               void *data, size_t len)
+{
+       struct udma_desc *d = to_udma_desc(desc);
+       struct udma_chan *uc = to_udma_chan(desc->chan);
+       struct cppi5_host_desc_t *h_desc;
+       u32 psd_size = len;
+       u32 flags = 0;
+
+       if (!uc->config.pkt_mode || !uc->config.metadata_size)
+               return -ENOTSUPP;
+
+       if (!data || len > uc->config.metadata_size)
+               return -EINVAL;
+
+       if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+               return -EINVAL;
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+       if (d->dir == DMA_MEM_TO_DEV)
+               memcpy(h_desc->epib, data, len);
+
+       if (uc->config.needs_epib)
+               psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       d->metadata = data;
+       d->metadata_size = len;
+       if (uc->config.needs_epib)
+               flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+
+       cppi5_hdesc_update_flags(h_desc, flags);
+       cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+       return 0;
+}
+
+static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+                                  size_t *payload_len, size_t *max_len)
+{
+       struct udma_desc *d = to_udma_desc(desc);
+       struct udma_chan *uc = to_udma_chan(desc->chan);
+       struct cppi5_host_desc_t *h_desc;
+
+       if (!uc->config.pkt_mode || !uc->config.metadata_size)
+               return ERR_PTR(-ENOTSUPP);
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+       *max_len = uc->config.metadata_size;
+
+       *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
+                      CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
+       *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
+
+       return h_desc->epib;
+}
+
+static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
+                                size_t payload_len)
+{
+       struct udma_desc *d = to_udma_desc(desc);
+       struct udma_chan *uc = to_udma_chan(desc->chan);
+       struct cppi5_host_desc_t *h_desc;
+       u32 psd_size = payload_len;
+       u32 flags = 0;
+
+       if (!uc->config.pkt_mode || !uc->config.metadata_size)
+               return -ENOTSUPP;
+
+       if (payload_len > uc->config.metadata_size)
+               return -EINVAL;
+
+       if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+               return -EINVAL;
+
+       h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+       if (uc->config.needs_epib) {
+               psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+               flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+       }
+
+       cppi5_hdesc_update_flags(h_desc, flags);
+       cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+       return 0;
+}
+
+static struct dma_descriptor_metadata_ops metadata_ops = {
+       .attach = udma_attach_metadata,
+       .get_ptr = udma_get_metadata_ptr,
+       .set_len = udma_set_metadata_len,
+};
+
+static struct dma_async_tx_descriptor *
+udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                  unsigned int sglen, enum dma_transfer_direction dir,
+                  unsigned long tx_flags, void *context)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct udma_desc *d;
+       u32 burst;
+
+       if (dir != uc->config.dir) {
+               dev_err(chan->device->dev,
+                       "%s: chan%d is for %s, not supporting %s\n",
+                       __func__, uc->id,
+                       dmaengine_get_direction_text(uc->config.dir),
+                       dmaengine_get_direction_text(dir));
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       if (uc->config.pkt_mode)
+               d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
+                                          context);
+       else
+               d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
+                                         context);
+
+       if (!d)
+               return NULL;
+
+       d->dir = dir;
+       d->desc_idx = 0;
+       d->tr_idx = 0;
+
+       /* static TR for remote PDMA */
+       if (udma_configure_statictr(uc, d, dev_width, burst)) {
+               dev_err(uc->ud->dev,
+                       "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+                       __func__, d->static_tr.bstcnt);
+
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return NULL;
+       }
+
+       if (uc->config.metadata_size)
+               d->vd.tx.metadata_ops = &metadata_ops;
+
+       return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
+                       size_t buf_len, size_t period_len,
+                       enum dma_transfer_direction dir, unsigned long flags)
+{
+       enum dma_slave_buswidth dev_width;
+       struct udma_desc *d;
+       size_t tr_size;
+       struct cppi5_tr_type1_t *tr_req;
+       unsigned int i;
+       unsigned int periods = buf_len / period_len;
+       u32 burst;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       /* Now allocate and setup the descriptor. */
+       tr_size = sizeof(struct cppi5_tr_type1_t);
+       d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+       if (!d)
+               return NULL;
+
+       tr_req = d->hwdesc[0].tr_req_base;
+       for (i = 0; i < periods; i++) {
+               cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+                             CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+               tr_req[i].addr = buf_addr + period_len * i;
+               tr_req[i].icnt0 = dev_width;
+               tr_req[i].icnt1 = period_len / dev_width;
+               tr_req[i].dim1 = dev_width;
+
+               if (!(flags & DMA_PREP_INTERRUPT))
+                       cppi5_tr_csf_set(&tr_req[i].flags,
+                                        CPPI5_TR_CSF_SUPR_EVT);
+       }
+
+       return d;
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
+                        size_t buf_len, size_t period_len,
+                        enum dma_transfer_direction dir, unsigned long flags)
+{
+       struct udma_desc *d;
+       u32 ring_id;
+       int i;
+       int periods = buf_len / period_len;
+
+       if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
+               return NULL;
+
+       if (period_len >= SZ_4M)
+               return NULL;
+
+       d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->hwdesc_count = periods;
+
+       /* TODO: re-check this... */
+       if (dir == DMA_DEV_TO_MEM)
+               ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+       else
+               ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+       for (i = 0; i < periods; i++) {
+               struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+               dma_addr_t period_addr = buf_addr + (period_len * i);
+               struct cppi5_host_desc_t *h_desc;
+
+               hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+                                               GFP_NOWAIT,
+                                               &hwdesc->cppi5_desc_paddr);
+               if (!hwdesc->cppi5_desc_vaddr) {
+                       dev_err(uc->ud->dev,
+                               "descriptor%d allocation failed\n", i);
+
+                       udma_free_hwdesc(uc, d);
+                       kfree(d);
+                       return NULL;
+               }
+
+               hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+               h_desc = hwdesc->cppi5_desc_vaddr;
+
+               cppi5_hdesc_init(h_desc, 0, 0);
+               cppi5_hdesc_set_pktlen(h_desc, period_len);
+
+               /* Flow and Packed ID */
+               cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
+                                     CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+               cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
+
+               /* attach each period to a new descriptor */
+               cppi5_hdesc_attach_buf(h_desc,
+                                      period_addr, period_len,
+                                      period_addr, period_len);
+       }
+
+       return d;
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+                    size_t period_len, enum dma_transfer_direction dir,
+                    unsigned long flags)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct udma_desc *d;
+       u32 burst;
+
+       if (dir != uc->config.dir) {
+               dev_err(chan->device->dev,
+                       "%s: chan%d is for %s, not supporting %s\n",
+                       __func__, uc->id,
+                       dmaengine_get_direction_text(uc->config.dir),
+                       dmaengine_get_direction_text(dir));
+               return NULL;
+       }
+
+       uc->cyclic = true;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_width = uc->cfg.src_addr_width;
+               burst = uc->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_width = uc->cfg.dst_addr_width;
+               burst = uc->cfg.dst_maxburst;
+       } else {
+               dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (!burst)
+               burst = 1;
+
+       if (uc->config.pkt_mode)
+               d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
+                                            dir, flags);
+       else
+               d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
+                                           dir, flags);
+
+       if (!d)
+               return NULL;
+
+       d->sglen = buf_len / period_len;
+
+       d->dir = dir;
+       d->residue = buf_len;
+
+       /* static TR for remote PDMA */
+       if (udma_configure_statictr(uc, d, dev_width, burst)) {
+               dev_err(uc->ud->dev,
+                       "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+                       __func__, d->static_tr.bstcnt);
+
+               udma_free_hwdesc(uc, d);
+               kfree(d);
+               return NULL;
+       }
+
+       if (uc->config.metadata_size)
+               d->vd.tx.metadata_ops = &metadata_ops;
+
+       return vchan_tx_prep(&uc->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+                    size_t len, unsigned long tx_flags)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       struct udma_desc *d;
+       struct cppi5_tr_type15_t *tr_req;
+       int num_tr;
+       size_t tr_size = sizeof(struct cppi5_tr_type15_t);
+       u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+
+       if (uc->config.dir != DMA_MEM_TO_MEM) {
+               dev_err(chan->device->dev,
+                       "%s: chan%d is for %s, not supporting %s\n",
+                       __func__, uc->id,
+                       dmaengine_get_direction_text(uc->config.dir),
+                       dmaengine_get_direction_text(DMA_MEM_TO_MEM));
+               return NULL;
+       }
+
+       if (len < SZ_64K) {
+               num_tr = 1;
+               tr0_cnt0 = len;
+               tr0_cnt1 = 1;
+       } else {
+               unsigned long align_to = __ffs(src | dest);
+
+               if (align_to > 3)
+                       align_to = 3;
+               /*
+                * Keep simple: tr0: SZ_64K-alignment blocks,
+                *              tr1: the remaining
+                */
+               num_tr = 2;
+               tr0_cnt0 = (SZ_64K - BIT(align_to));
+               if (len / tr0_cnt0 >= SZ_64K) {
+                       dev_err(uc->ud->dev, "size %zu is not supported\n",
+                               len);
+                       return NULL;
+               }
+
+               tr0_cnt1 = len / tr0_cnt0;
+               tr1_cnt0 = len % tr0_cnt0;
+       }
+
+       d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
+       if (!d)
+               return NULL;
+
+       d->dir = DMA_MEM_TO_MEM;
+       d->desc_idx = 0;
+       d->tr_idx = 0;
+       d->residue = len;
+
+       tr_req = d->hwdesc[0].tr_req_base;
+
+       cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
+                     CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+       cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+       tr_req[0].addr = src;
+       tr_req[0].icnt0 = tr0_cnt0;
+       tr_req[0].icnt1 = tr0_cnt1;
+       tr_req[0].icnt2 = 1;
+       tr_req[0].icnt3 = 1;
+       tr_req[0].dim1 = tr0_cnt0;
+
+       tr_req[0].daddr = dest;
+       tr_req[0].dicnt0 = tr0_cnt0;
+       tr_req[0].dicnt1 = tr0_cnt1;
+       tr_req[0].dicnt2 = 1;
+       tr_req[0].dicnt3 = 1;
+       tr_req[0].ddim1 = tr0_cnt0;
+
+       if (num_tr == 2) {
+               cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
+                             CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+               cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+               tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
+               tr_req[1].icnt0 = tr1_cnt0;
+               tr_req[1].icnt1 = 1;
+               tr_req[1].icnt2 = 1;
+               tr_req[1].icnt3 = 1;
+
+               tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
+               tr_req[1].dicnt0 = tr1_cnt0;
+               tr_req[1].dicnt1 = 1;
+               tr_req[1].dicnt2 = 1;
+               tr_req[1].dicnt3 = 1;
+       }
+
+       cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+
+       if (uc->config.metadata_size)
+               d->vd.tx.metadata_ops = &metadata_ops;
+
+       return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static void udma_issue_pending(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       /* If we have something pending and no active descriptor, then */
+       if (vchan_issue_pending(&uc->vc) && !uc->desc) {
+               /*
+                * start a descriptor if the channel is NOT [marked as
+                * terminating _and_ it is still running (teardown has not
+                * completed yet)].
+                */
+               if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
+                     udma_is_chan_running(uc)))
+                       udma_start(uc);
+       }
+
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+}
+
+static enum dma_status udma_tx_status(struct dma_chan *chan,
+                                     dma_cookie_t cookie,
+                                     struct dma_tx_state *txstate)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       enum dma_status ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
+               ret = DMA_PAUSED;
+
+       if (ret == DMA_COMPLETE || !txstate)
+               goto out;
+
+       if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
+               u32 peer_bcnt = 0;
+               u32 bcnt = 0;
+               u32 residue = uc->desc->residue;
+               u32 delay = 0;
+
+               if (uc->desc->dir == DMA_MEM_TO_DEV) {
+                       bcnt = udma_tchanrt_read(uc->tchan,
+                                                UDMA_TCHAN_RT_SBCNT_REG);
+
+                       if (uc->config.ep_type != PSIL_EP_NATIVE) {
+                               peer_bcnt = udma_tchanrt_read(uc->tchan,
+                                               UDMA_TCHAN_RT_PEER_BCNT_REG);
+
+                               if (bcnt > peer_bcnt)
+                                       delay = bcnt - peer_bcnt;
+                       }
+               } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
+                       bcnt = udma_rchanrt_read(uc->rchan,
+                                                UDMA_RCHAN_RT_BCNT_REG);
+
+                       if (uc->config.ep_type != PSIL_EP_NATIVE) {
+                               peer_bcnt = udma_rchanrt_read(uc->rchan,
+                                               UDMA_RCHAN_RT_PEER_BCNT_REG);
+
+                               if (peer_bcnt > bcnt)
+                                       delay = peer_bcnt - bcnt;
+                       }
+               } else {
+                       bcnt = udma_tchanrt_read(uc->tchan,
+                                                UDMA_TCHAN_RT_BCNT_REG);
+               }
+
+               bcnt -= uc->bcnt;
+               if (bcnt && !(bcnt % uc->desc->residue))
+                       residue = 0;
+               else
+                       residue -= bcnt % uc->desc->residue;
+
+               if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
+                       ret = DMA_COMPLETE;
+                       delay = 0;
+               }
+
+               dma_set_residue(txstate, residue);
+               dma_set_in_flight_bytes(txstate, delay);
+
+       } else {
+               ret = DMA_COMPLETE;
+       }
+
+out:
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+       return ret;
+}
+
+static int udma_pause(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+
+       if (!uc->desc)
+               return -EINVAL;
+
+       /* pause the channel */
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_update_bits(uc->rchan,
+                                        UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE,
+                                        UDMA_PEER_RT_EN_PAUSE);
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_update_bits(uc->tchan,
+                                        UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE,
+                                        UDMA_PEER_RT_EN_PAUSE);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                        UDMA_CHAN_RT_CTL_PAUSE,
+                                        UDMA_CHAN_RT_CTL_PAUSE);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int udma_resume(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+
+       if (!uc->desc)
+               return -EINVAL;
+
+       /* resume the channel */
+       switch (uc->desc->dir) {
+       case DMA_DEV_TO_MEM:
+               udma_rchanrt_update_bits(uc->rchan,
+                                        UDMA_RCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE, 0);
+
+               break;
+       case DMA_MEM_TO_DEV:
+               udma_tchanrt_update_bits(uc->tchan,
+                                        UDMA_TCHAN_RT_PEER_RT_EN_REG,
+                                        UDMA_PEER_RT_EN_PAUSE, 0);
+               break;
+       case DMA_MEM_TO_MEM:
+               udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+                                        UDMA_CHAN_RT_CTL_PAUSE, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int udma_terminate_all(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&uc->vc.lock, flags);
+
+       if (udma_is_chan_running(uc))
+               udma_stop(uc);
+
+       if (uc->desc) {
+               uc->terminated_desc = uc->desc;
+               uc->desc = NULL;
+               uc->terminated_desc->terminated = true;
+               cancel_delayed_work(&uc->tx_drain.work);
+       }
+
+       uc->paused = false;
+
+       vchan_get_all_descriptors(&uc->vc, &head);
+       spin_unlock_irqrestore(&uc->vc.lock, flags);
+       vchan_dma_desc_free_list(&uc->vc, &head);
+
+       return 0;
+}
+
+static void udma_synchronize(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       unsigned long timeout = msecs_to_jiffies(1000);
+
+       vchan_synchronize(&uc->vc);
+
+       if (uc->state == UDMA_CHAN_IS_TERMINATING) {
+               timeout = wait_for_completion_timeout(&uc->teardown_completed,
+                                                     timeout);
+               if (!timeout) {
+                       dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
+                                uc->id);
+                       udma_dump_chan_stdata(uc);
+                       udma_reset_chan(uc, true);
+               }
+       }
+
+       udma_reset_chan(uc, false);
+       if (udma_is_chan_running(uc))
+               dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+
+       cancel_delayed_work_sync(&uc->tx_drain.work);
+       udma_reset_rings(uc);
+}
+
+static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+                                  struct virt_dma_desc *vd,
+                                  struct dmaengine_result *result)
+{
+       struct udma_chan *uc = to_udma_chan(&vc->chan);
+       struct udma_desc *d;
+
+       if (!vd)
+               return;
+
+       d = to_udma_desc(&vd->tx);
+
+       if (d->metadata_size)
+               udma_fetch_epib(uc, d);
+
+       /* Provide residue information for the client */
+       if (result) {
+               void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+               if (cppi5_desc_get_type(desc_vaddr) ==
+                   CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+                       result->residue = d->residue -
+                                         cppi5_hdesc_get_pktlen(desc_vaddr);
+                       if (result->residue)
+                               result->result = DMA_TRANS_ABORTED;
+                       else
+                               result->result = DMA_TRANS_NOERROR;
+               } else {
+                       result->residue = 0;
+                       result->result = DMA_TRANS_NOERROR;
+               }
+       }
+}
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void udma_vchan_complete(unsigned long arg)
+{
+       struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+       struct virt_dma_desc *vd, *_vd;
+       struct dmaengine_desc_callback cb;
+       LIST_HEAD(head);
+
+       spin_lock_irq(&vc->lock);
+       list_splice_tail_init(&vc->desc_completed, &head);
+       vd = vc->cyclic;
+       if (vd) {
+               vc->cyclic = NULL;
+               dmaengine_desc_get_callback(&vd->tx, &cb);
+       } else {
+               memset(&cb, 0, sizeof(cb));
+       }
+       spin_unlock_irq(&vc->lock);
+
+       udma_desc_pre_callback(vc, vd, NULL);
+       dmaengine_desc_callback_invoke(&cb, NULL);
+
+       list_for_each_entry_safe(vd, _vd, &head, node) {
+               struct dmaengine_result result;
+
+               dmaengine_desc_get_callback(&vd->tx, &cb);
+
+               list_del(&vd->node);
+
+               udma_desc_pre_callback(vc, vd, &result);
+               dmaengine_desc_callback_invoke(&cb, &result);
+
+               vchan_vdesc_fini(vd);
+       }
+}
+
+static void udma_free_chan_resources(struct dma_chan *chan)
+{
+       struct udma_chan *uc = to_udma_chan(chan);
+       struct udma_dev *ud = to_udma_dev(chan->device);
+
+       udma_terminate_all(chan);
+       if (uc->terminated_desc) {
+               udma_reset_chan(uc, false);
+               udma_reset_rings(uc);
+       }
+
+       cancel_delayed_work_sync(&uc->tx_drain.work);
+       destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
+       if (uc->irq_num_ring > 0) {
+               free_irq(uc->irq_num_ring, uc);
+
+               uc->irq_num_ring = 0;
+       }
+       if (uc->irq_num_udma > 0) {
+               free_irq(uc->irq_num_udma, uc);
+
+               uc->irq_num_udma = 0;
+       }
+
+       /* Release PSI-L pairing */
+       if (uc->psil_paired) {
+               navss_psil_unpair(ud, uc->config.src_thread,
+                                 uc->config.dst_thread);
+               uc->psil_paired = false;
+       }
+
+       vchan_free_chan_resources(&uc->vc);
+       tasklet_kill(&uc->vc.task);
+
+       udma_free_tx_resources(uc);
+       udma_free_rx_resources(uc);
+       udma_reset_uchan(uc);
+
+       if (uc->use_dma_pool) {
+               dma_pool_destroy(uc->hdesc_pool);
+               uc->use_dma_pool = false;
+       }
+}
+
+static struct platform_driver udma_driver;
+
+static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+       struct udma_chan_config *ucc;
+       struct psil_endpoint_config *ep_config;
+       struct udma_chan *uc;
+       struct udma_dev *ud;
+       u32 *args;
+
+       if (chan->device->dev->driver != &udma_driver.driver)
+               return false;
+
+       uc = to_udma_chan(chan);
+       ucc = &uc->config;
+       ud = uc->ud;
+       args = param;
+
+       ucc->remote_thread_id = args[0];
+
+       if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+               ucc->dir = DMA_MEM_TO_DEV;
+       else
+               ucc->dir = DMA_DEV_TO_MEM;
+
+       ep_config = psil_get_ep_config(ucc->remote_thread_id);
+       if (IS_ERR(ep_config)) {
+               dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
+                       ucc->remote_thread_id);
+               ucc->dir = DMA_MEM_TO_MEM;
+               ucc->remote_thread_id = -1;
+               return false;
+       }
+
+       ucc->pkt_mode = ep_config->pkt_mode;
+       ucc->channel_tpl = ep_config->channel_tpl;
+       ucc->notdpkt = ep_config->notdpkt;
+       ucc->ep_type = ep_config->ep_type;
+
+       if (ucc->ep_type != PSIL_EP_NATIVE) {
+               const struct udma_match_data *match_data = ud->match_data;
+
+               if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
+                       ucc->enable_acc32 = ep_config->pdma_acc32;
+               if (match_data->flags & UDMA_FLAG_PDMA_BURST)
+                       ucc->enable_burst = ep_config->pdma_burst;
+       }
+
+       ucc->needs_epib = ep_config->needs_epib;
+       ucc->psd_size = ep_config->psd_size;
+       ucc->metadata_size =
+                       (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
+                       ucc->psd_size;
+
+       if (ucc->pkt_mode)
+               ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+                                ucc->metadata_size, ud->desc_align);
+
+       dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
+               ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
+
+       return true;
+}
+
+static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+                                     struct of_dma *ofdma)
+{
+       struct udma_dev *ud = ofdma->of_dma_data;
+       dma_cap_mask_t mask = ud->ddev.cap_mask;
+       struct dma_chan *chan;
+
+       if (dma_spec->args_count != 1)
+               return NULL;
+
+       chan = __dma_request_channel(&mask, udma_dma_filter_fn,
+                                    &dma_spec->args[0], ofdma->of_node);
+       if (!chan) {
+               dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return chan;
+}
+
+static struct udma_match_data am654_main_data = {
+       .psil_base = 0x1000,
+       .enable_memcpy_support = true,
+       .statictr_z_mask = GENMASK(11, 0),
+       .rchan_oes_offset = 0x2000,
+       .tpl_levels = 2,
+       .level_start_idx = {
+               [0] = 8, /* Normal channels */
+               [1] = 0, /* High Throughput channels */
+       },
+};
+
+static struct udma_match_data am654_mcu_data = {
+       .psil_base = 0x6000,
+       .enable_memcpy_support = true, /* TEST: DMA domains */
+       .statictr_z_mask = GENMASK(11, 0),
+       .rchan_oes_offset = 0x2000,
+       .tpl_levels = 2,
+       .level_start_idx = {
+               [0] = 2, /* Normal channels */
+               [1] = 0, /* High Throughput channels */
+       },
+};
+
+static struct udma_match_data j721e_main_data = {
+       .psil_base = 0x1000,
+       .enable_memcpy_support = true,
+       .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+       .statictr_z_mask = GENMASK(23, 0),
+       .rchan_oes_offset = 0x400,
+       .tpl_levels = 3,
+       .level_start_idx = {
+               [0] = 16, /* Normal channels */
+               [1] = 4, /* High Throughput channels */
+               [2] = 0, /* Ultra High Throughput channels */
+       },
+};
+
+static struct udma_match_data j721e_mcu_data = {
+       .psil_base = 0x6000,
+       .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
+       .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+       .statictr_z_mask = GENMASK(23, 0),
+       .rchan_oes_offset = 0x400,
+       .tpl_levels = 2,
+       .level_start_idx = {
+               [0] = 2, /* Normal channels */
+               [1] = 0, /* High Throughput channels */
+       },
+};
+
+static const struct of_device_id udma_of_match[] = {
+       {
+               .compatible = "ti,am654-navss-main-udmap",
+               .data = &am654_main_data,
+       },
+       {
+               .compatible = "ti,am654-navss-mcu-udmap",
+               .data = &am654_mcu_data,
+       }, {
+               .compatible = "ti,j721e-navss-main-udmap",
+               .data = &j721e_main_data,
+       }, {
+               .compatible = "ti,j721e-navss-mcu-udmap",
+               .data = &j721e_mcu_data,
+       },
+       { /* Sentinel */ },
+};
+
+static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+{
+       struct resource *res;
+       int i;
+
+       for (i = 0; i < MMR_LAST; i++) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  mmr_names[i]);
+               ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(ud->mmrs[i]))
+                       return PTR_ERR(ud->mmrs[i]);
+       }
+
+       return 0;
+}
+
+static int udma_setup_resources(struct udma_dev *ud)
+{
+       struct device *dev = ud->dev;
+       int ch_count, ret, i, j;
+       u32 cap2, cap3;
+       struct ti_sci_resource_desc *rm_desc;
+       struct ti_sci_resource *rm_res, irq_res;
+       struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+       static const char * const range_names[] = { "ti,sci-rm-range-tchan",
+                                                   "ti,sci-rm-range-rchan",
+                                                   "ti,sci-rm-range-rflow" };
+
+       cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+       cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+       ud->rflow_cnt = cap3 & 0x3fff;
+       ud->tchan_cnt = cap2 & 0x1ff;
+       ud->echan_cnt = (cap2 >> 9) & 0x1ff;
+       ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
+       ch_count  = ud->tchan_cnt + ud->rchan_cnt;
+
+       ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+                                          sizeof(unsigned long), GFP_KERNEL);
+       ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+                                 GFP_KERNEL);
+       ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+                                          sizeof(unsigned long), GFP_KERNEL);
+       ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+                                 GFP_KERNEL);
+       ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
+                                             sizeof(unsigned long),
+                                             GFP_KERNEL);
+       ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+                                                 BITS_TO_LONGS(ud->rflow_cnt),
+                                                 sizeof(unsigned long),
+                                                 GFP_KERNEL);
+       ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+                                       sizeof(unsigned long),
+                                       GFP_KERNEL);
+       ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+                                 GFP_KERNEL);
+
+       if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+           !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+           !ud->rflows || !ud->rflow_in_use)
+               return -ENOMEM;
+
+       /*
+        * RX flows with the same Ids as RX channels are reserved to be used
+        * as default flows if remote HW can't generate flow_ids. Those
+        * RX flows can be requested only explicitly by id.
+        */
+       bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+       /* by default no GP rflows are assigned to Linux */
+       bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
+
+       /* Get resource ranges from tisci */
+       for (i = 0; i < RM_RANGE_LAST; i++)
+               tisci_rm->rm_ranges[i] =
+                       devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+                                                   tisci_rm->tisci_dev_id,
+                                                   (char *)range_names[i]);
+
+       /* tchan ranges */
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+       if (IS_ERR(rm_res)) {
+               bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+       } else {
+               bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+               for (i = 0; i < rm_res->sets; i++) {
+                       rm_desc = &rm_res->desc[i];
+                       bitmap_clear(ud->tchan_map, rm_desc->start,
+                                    rm_desc->num);
+                       dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+                               rm_desc->start, rm_desc->num);
+               }
+       }
+       irq_res.sets = rm_res->sets;
+
+       /* rchan and matching default flow ranges */
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+       if (IS_ERR(rm_res)) {
+               bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+       } else {
+               bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+               for (i = 0; i < rm_res->sets; i++) {
+                       rm_desc = &rm_res->desc[i];
+                       bitmap_clear(ud->rchan_map, rm_desc->start,
+                                    rm_desc->num);
+                       dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+                               rm_desc->start, rm_desc->num);
+               }
+       }
+
+       irq_res.sets += rm_res->sets;
+       irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+       for (i = 0; i < rm_res->sets; i++) {
+               irq_res.desc[i].start = rm_res->desc[i].start;
+               irq_res.desc[i].num = rm_res->desc[i].num;
+       }
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+       for (j = 0; j < rm_res->sets; j++, i++) {
+               irq_res.desc[i].start = rm_res->desc[j].start +
+                                       ud->match_data->rchan_oes_offset;
+               irq_res.desc[i].num = rm_res->desc[j].num;
+       }
+       ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+       kfree(irq_res.desc);
+       if (ret) {
+               dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+               return ret;
+       }
+
+       /* GP rflow ranges */
+       rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+       if (IS_ERR(rm_res)) {
+               /* all gp flows are assigned exclusively to Linux */
+               bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
+                            ud->rflow_cnt - ud->rchan_cnt);
+       } else {
+               for (i = 0; i < rm_res->sets; i++) {
+                       rm_desc = &rm_res->desc[i];
+                       bitmap_clear(ud->rflow_gp_map, rm_desc->start,
+                                    rm_desc->num);
+                       dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+                               rm_desc->start, rm_desc->num);
+               }
+       }
+
+       ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
+       ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
+       if (!ch_count)
+               return -ENODEV;
+
+       ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
+                                   GFP_KERNEL);
+       if (!ud->channels)
+               return -ENOMEM;
+
+       dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+                ch_count,
+                ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
+                ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
+                ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+                                              ud->rflow_cnt));
+
+       return ch_count;
+}
+
+#define TI_UDMAC_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int udma_probe(struct platform_device *pdev)
+{
+       struct device_node *navss_node = pdev->dev.parent->of_node;
+       struct device *dev = &pdev->dev;
+       struct udma_dev *ud;
+       const struct of_device_id *match;
+       int i, ret;
+       int ch_count;
+
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (ret)
+               dev_err(dev, "failed to set dma mask stuff\n");
+
+       ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
+       if (!ud)
+               return -ENOMEM;
+
+       ret = udma_get_mmrs(pdev, ud);
+       if (ret)
+               return ret;
+
+       ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
+       if (IS_ERR(ud->tisci_rm.tisci))
+               return PTR_ERR(ud->tisci_rm.tisci);
+
+       ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
+                                  &ud->tisci_rm.tisci_dev_id);
+       if (ret) {
+               dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
+               return ret;
+       }
+       pdev->id = ud->tisci_rm.tisci_dev_id;
+
+       ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
+                                  &ud->tisci_rm.tisci_navss_dev_id);
+       if (ret) {
+               dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
+               return ret;
+       }
+
+       ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
+       ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
+
+       ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+       if (IS_ERR(ud->ringacc))
+               return PTR_ERR(ud->ringacc);
+
+       dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+                                           DOMAIN_BUS_TI_SCI_INTA_MSI);
+       if (!dev->msi_domain) {
+               dev_err(dev, "Failed to get MSI domain\n");
+               return -EPROBE_DEFER;
+       }
+
+       match = of_match_node(udma_of_match, dev->of_node);
+       if (!match) {
+               dev_err(dev, "No compatible match found\n");
+               return -ENODEV;
+       }
+       ud->match_data = match->data;
+
+       dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+
+       ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
+       ud->ddev.device_config = udma_slave_config;
+       ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
+       ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+       ud->ddev.device_issue_pending = udma_issue_pending;
+       ud->ddev.device_tx_status = udma_tx_status;
+       ud->ddev.device_pause = udma_pause;
+       ud->ddev.device_resume = udma_resume;
+       ud->ddev.device_terminate_all = udma_terminate_all;
+       ud->ddev.device_synchronize = udma_synchronize;
+
+       ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+       ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
+       ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
+       ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
+       ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
+                                      DESC_METADATA_ENGINE;
+       if (ud->match_data->enable_memcpy_support) {
+               dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
+               ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
+               ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
+       }
+
+       ud->ddev.dev = dev;
+       ud->dev = dev;
+       ud->psil_base = ud->match_data->psil_base;
+
+       INIT_LIST_HEAD(&ud->ddev.channels);
+       INIT_LIST_HEAD(&ud->desc_to_purge);
+
+       ch_count = udma_setup_resources(ud);
+       if (ch_count <= 0)
+               return ch_count;
+
+       spin_lock_init(&ud->lock);
+       INIT_WORK(&ud->purge_work, udma_purge_desc_work);
+
+       ud->desc_align = 64;
+       if (ud->desc_align < dma_get_cache_alignment())
+               ud->desc_align = dma_get_cache_alignment();
+
+       for (i = 0; i < ud->tchan_cnt; i++) {
+               struct udma_tchan *tchan = &ud->tchans[i];
+
+               tchan->id = i;
+               tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
+       }
+
+       for (i = 0; i < ud->rchan_cnt; i++) {
+               struct udma_rchan *rchan = &ud->rchans[i];
+
+               rchan->id = i;
+               rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
+       }
+
+       for (i = 0; i < ud->rflow_cnt; i++) {
+               struct udma_rflow *rflow = &ud->rflows[i];
+
+               rflow->id = i;
+       }
+
+       for (i = 0; i < ch_count; i++) {
+               struct udma_chan *uc = &ud->channels[i];
+
+               uc->ud = ud;
+               uc->vc.desc_free = udma_desc_free;
+               uc->id = i;
+               uc->tchan = NULL;
+               uc->rchan = NULL;
+               uc->config.remote_thread_id = -1;
+               uc->config.dir = DMA_MEM_TO_MEM;
+               uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+                                         dev_name(dev), i);
+
+               vchan_init(&uc->vc, &ud->ddev);
+               /* Use custom vchan completion handling */
+               tasklet_init(&uc->vc.task, udma_vchan_complete,
+                            (unsigned long)&uc->vc);
+               init_completion(&uc->teardown_completed);
+       }
+
+       ret = dma_async_device_register(&ud->ddev);
+       if (ret) {
+               dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, ud);
+
+       ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
+       if (ret) {
+               dev_err(dev, "failed to register of_dma controller\n");
+               dma_async_device_unregister(&ud->ddev);
+       }
+
+       return ret;
+}
+
+static struct platform_driver udma_driver = {
+       .driver = {
+               .name   = "ti-udma",
+               .of_match_table = udma_of_match,
+               .suppress_bind_attrs = true,
+       },
+       .probe          = udma_probe,
+};
+builtin_platform_driver(udma_driver);
+
+/* Private interfaces to UDMA */
+#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
new file mode 100644 (file)
index 0000000..128d874
--- /dev/null
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_H_
+#define K3_UDMA_H_
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* Global registers */
+#define UDMA_REV_REG                   0x0
+#define UDMA_PERF_CTL_REG              0x4
+#define UDMA_EMU_CTL_REG               0x8
+#define UDMA_PSIL_TO_REG               0x10
+#define UDMA_UTC_CTL_REG               0x1c
+#define UDMA_CAP_REG(i)                        (0x20 + ((i) * 4))
+#define UDMA_RX_FLOW_ID_FW_OES_REG     0x80
+#define UDMA_RX_FLOW_ID_FW_STATUS_REG  0x88
+
+/* TX chan RT regs */
+#define UDMA_TCHAN_RT_CTL_REG          0x0
+#define UDMA_TCHAN_RT_SWTRIG_REG       0x8
+#define UDMA_TCHAN_RT_STDATA_REG       0x80
+
+#define UDMA_TCHAN_RT_PEER_REG(i)      (0x200 + ((i) * 0x4))
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG    \
+       UDMA_TCHAN_RT_PEER_REG(0)       /* PSI-L: 0x400 */
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG     \
+       UDMA_TCHAN_RT_PEER_REG(1)       /* PSI-L: 0x401 */
+#define UDMA_TCHAN_RT_PEER_BCNT_REG            \
+       UDMA_TCHAN_RT_PEER_REG(4)       /* PSI-L: 0x404 */
+#define UDMA_TCHAN_RT_PEER_RT_EN_REG           \
+       UDMA_TCHAN_RT_PEER_REG(8)       /* PSI-L: 0x408 */
+
+#define UDMA_TCHAN_RT_PCNT_REG         0x400
+#define UDMA_TCHAN_RT_BCNT_REG         0x408
+#define UDMA_TCHAN_RT_SBCNT_REG                0x410
+
+/* RX chan RT regs */
+#define UDMA_RCHAN_RT_CTL_REG          0x0
+#define UDMA_RCHAN_RT_SWTRIG_REG       0x8
+#define UDMA_RCHAN_RT_STDATA_REG       0x80
+
+#define UDMA_RCHAN_RT_PEER_REG(i)      (0x200 + ((i) * 0x4))
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG    \
+       UDMA_RCHAN_RT_PEER_REG(0)       /* PSI-L: 0x400 */
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG     \
+       UDMA_RCHAN_RT_PEER_REG(1)       /* PSI-L: 0x401 */
+#define UDMA_RCHAN_RT_PEER_BCNT_REG            \
+       UDMA_RCHAN_RT_PEER_REG(4)       /* PSI-L: 0x404 */
+#define UDMA_RCHAN_RT_PEER_RT_EN_REG           \
+       UDMA_RCHAN_RT_PEER_REG(8)       /* PSI-L: 0x408 */
+
+#define UDMA_RCHAN_RT_PCNT_REG         0x400
+#define UDMA_RCHAN_RT_BCNT_REG         0x408
+#define UDMA_RCHAN_RT_SBCNT_REG                0x410
+
+/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */
+#define UDMA_CHAN_RT_CTL_EN            BIT(31)
+#define UDMA_CHAN_RT_CTL_TDOWN         BIT(30)
+#define UDMA_CHAN_RT_CTL_PAUSE         BIT(29)
+#define UDMA_CHAN_RT_CTL_FTDOWN                BIT(28)
+#define UDMA_CHAN_RT_CTL_ERROR         BIT(0)
+
+/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */
+#define UDMA_PEER_RT_EN_ENABLE         BIT(31)
+#define UDMA_PEER_RT_EN_TEARDOWN       BIT(30)
+#define UDMA_PEER_RT_EN_PAUSE          BIT(29)
+#define UDMA_PEER_RT_EN_FLUSH          BIT(28)
+#define UDMA_PEER_RT_EN_IDLE           BIT(1)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
+ */
+#define PDMA_STATIC_TR_X_MASK          GENMASK(26, 24)
+#define PDMA_STATIC_TR_X_SHIFT         (24)
+#define PDMA_STATIC_TR_Y_MASK          GENMASK(11, 0)
+#define PDMA_STATIC_TR_Y_SHIFT         (0)
+
+#define PDMA_STATIC_TR_Y(x)    \
+       (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK)
+#define PDMA_STATIC_TR_X(x)    \
+       (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK)
+
+#define PDMA_STATIC_TR_XY_ACC32                BIT(30)
+#define PDMA_STATIC_TR_XY_BURST                BIT(31)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
+ */
+#define PDMA_STATIC_TR_Z(x, mask)      ((x) & (mask))
+
+struct udma_dev;
+struct udma_tchan;
+struct udma_rchan;
+struct udma_rflow;
+
+enum udma_rm_range {
+       RM_RANGE_TCHAN = 0,
+       RM_RANGE_RCHAN,
+       RM_RANGE_RFLOW,
+       RM_RANGE_LAST,
+};
+
+struct udma_tisci_rm {
+       const struct ti_sci_handle *tisci;
+       const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
+       u32  tisci_dev_id;
+
+       /* tisci information for PSI-L thread pairing/unpairing */
+       const struct ti_sci_rm_psil_ops *tisci_psil_ops;
+       u32  tisci_navss_dev_id;
+
+       struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
+};
+
+/* Direct access to UDMA low lever resources for the glue layer */
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+                           u32 dst_thread);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+void xudma_dev_put(struct udma_dev *ud);
+u32 xudma_dev_get_psil_base(struct udma_dev *ud);
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+
+struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
+struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id);
+
+void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p);
+void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p);
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p);
+
+int xudma_tchan_get_id(struct udma_tchan *p);
+int xudma_rchan_get_id(struct udma_rchan *p);
+int xudma_rflow_get_id(struct udma_rflow *p);
+
+u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
+void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
+u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
+void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+
+#endif /* K3_UDMA_H_ */
index ec4adf4260a098488fff4471e82c2f1a89b90c54..23e33a85f03322131e17e045cfbdd925456037ab 100644 (file)
@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
                dmaengine_desc_get_callback(&vd->tx, &cb);
 
                list_del(&vd->node);
-               vchan_vdesc_fini(vd);
-
                dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
+               vchan_vdesc_fini(vd);
        }
 }
 
@@ -115,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
        struct virt_dma_desc *vd, *_vd;
 
        list_for_each_entry_safe(vd, _vd, head, node) {
-               if (dmaengine_desc_test_reuse(&vd->tx)) {
-                       list_move_tail(&vd->node, &vc->desc_allocated);
-               } else {
-                       dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
-                       list_del(&vd->node);
-                       vc->desc_free(vd);
-               }
+               list_del(&vd->node);
+               vchan_vdesc_fini(vd);
        }
 }
 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -135,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
        INIT_LIST_HEAD(&vc->desc_submitted);
        INIT_LIST_HEAD(&vc->desc_issued);
        INIT_LIST_HEAD(&vc->desc_completed);
+       INIT_LIST_HEAD(&vc->desc_terminated);
 
        tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
 
index ab158bac03a7c81ae24cf8aaca5a599b3b223177..e9f5250fbe4dbc9e4f1f9d6c01fdbae8b2243cf2 100644 (file)
@@ -31,9 +31,9 @@ struct virt_dma_chan {
        struct list_head desc_submitted;
        struct list_head desc_issued;
        struct list_head desc_completed;
+       struct list_head desc_terminated;
 
        struct virt_dma_desc *cyclic;
-       struct virt_dma_desc *vd_terminated;
 };
 
 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
 {
        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 
-       if (dmaengine_desc_test_reuse(&vd->tx))
+       if (dmaengine_desc_test_reuse(&vd->tx)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&vc->lock, flags);
                list_add(&vd->node, &vc->desc_allocated);
-       else
+               spin_unlock_irqrestore(&vc->lock, flags);
+       } else {
                vc->desc_free(vd);
+       }
 }
 
 /**
@@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
 {
        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
 
-       /* free up stuck descriptor */
-       if (vc->vd_terminated)
-               vchan_vdesc_fini(vc->vd_terminated);
+       list_add_tail(&vd->node, &vc->desc_terminated);
 
-       vc->vd_terminated = vd;
        if (vc->cyclic == vd)
                vc->cyclic = NULL;
 }
@@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
        list_splice_tail_init(&vc->desc_submitted, head);
        list_splice_tail_init(&vc->desc_issued, head);
        list_splice_tail_init(&vc->desc_completed, head);
+       list_splice_tail_init(&vc->desc_terminated, head);
 }
 
 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
@@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
  */
 static inline void vchan_synchronize(struct virt_dma_chan *vc)
 {
+       LIST_HEAD(head);
        unsigned long flags;
 
        tasklet_kill(&vc->task);
 
        spin_lock_irqsave(&vc->lock, flags);
-       if (vc->vd_terminated) {
-               vchan_vdesc_fini(vc->vd_terminated);
-               vc->vd_terminated = NULL;
-       }
+
+       list_splice_tail_init(&vc->desc_terminated, &head);
+
        spin_unlock_irqrestore(&vc->lock, flags);
+
+       vchan_dma_desc_free_list(vc, &head);
 }
 
 #endif
index 9c845c07b107c363e71aa810ef60d4511e6deac3..d47749a35863fa5f81b5ee42b1f096306d9a5297 100644 (file)
 /* Max transfer size per descriptor */
 #define ZYNQMP_DMA_MAX_TRANS_LEN       0x40000000
 
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN    32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN    32768U
+
 /* Reset values for data attributes */
 #define ZYNQMP_DMA_AXCACHE_VAL         0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL       0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL       0xF
 
 #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL   0x1F
 
@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
 
 static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
 {
-       u32 val;
+       u32 val, burst_val;
 
        val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
        val |= ZYNQMP_DMA_POINT_TYPE_SG;
        writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
 
        val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+       burst_val = __ilog2_u32(chan->src_burst_len);
        val = (val & ~ZYNQMP_DMA_ARLEN) |
-               (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+               ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+       burst_val = __ilog2_u32(chan->dst_burst_len);
        val = (val & ~ZYNQMP_DMA_AWLEN) |
-               (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+               ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
        writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
 }
 
@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
 {
        struct zynqmp_dma_chan *chan = to_chan(dchan);
 
-       chan->src_burst_len = config->src_maxburst;
-       chan->dst_burst_len = config->dst_maxburst;
+       chan->src_burst_len = clamp(config->src_maxburst, 1U,
+               ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+       chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+               ZYNQMP_DMA_MAX_DST_BURST_LEN);
 
        return 0;
 }
@@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
                return PTR_ERR(chan->regs);
 
        chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
-       chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
-       chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+       chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+       chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
        err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
        if (err < 0) {
                dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
index 417dad6355268934383074c1759154535d114a33..b3c99bb5fe77b241fa3e86911cf828e49427cc7f 100644 (file)
@@ -462,7 +462,7 @@ config EDAC_ALTERA_SDMMC
 
 config EDAC_SIFIVE
        bool "Sifive platform EDAC driver"
-       depends on EDAC=y && RISCV
+       depends on EDAC=y && SIFIVE_L2
        help
          Support for error detection and correction on the SiFive SoCs.
 
@@ -491,8 +491,7 @@ config EDAC_TI
        tristate "Texas Instruments DDR3 ECC Controller"
        depends on ARCH_KEYSTONE || SOC_DRA7XX
        help
-         Support for error detection and correction on the
-          TI SoCs.
+         Support for error detection and correction on the TI SoCs.
 
 config EDAC_QCOM
        tristate "QCOM EDAC Controller"
index 428ce98f6776ccca4050093f448fd8fcc3b17815..9fbad908a854810ab716bc98a00da83ba6e041bb 100644 (file)
@@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
 
        scrubval = scrubrates[i].scrubval;
 
-       if (pvt->fam == 0x17 || pvt->fam == 0x18) {
+       if (pvt->umc) {
                __f17h_set_scrubval(pvt, scrubval);
        } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
                f15h_select_dct(pvt, 0);
@@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
        int i, retval = -EINVAL;
        u32 scrubval = 0;
 
-       switch (pvt->fam) {
-       case 0x15:
-               /* Erratum #505 */
-               if (pvt->model < 0x10)
-                       f15h_select_dct(pvt, 0);
-
-               if (pvt->model == 0x60)
-                       amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
-               break;
-
-       case 0x17:
-       case 0x18:
+       if (pvt->umc) {
                amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
                if (scrubval & BIT(0)) {
                        amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
                } else {
                        scrubval = 0;
                }
-               break;
+       } else if (pvt->fam == 0x15) {
+               /* Erratum #505 */
+               if (pvt->model < 0x10)
+                       f15h_select_dct(pvt, 0);
 
-       default:
+               if (pvt->model == 0x60)
+                       amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+       } else {
                amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
-               break;
        }
 
        scrubval = scrubval & 0x001F;
@@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt)
 {
        u32 dram_ctrl, dcsm;
 
+       if (pvt->umc) {
+               if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+                       pvt->dram_type = MEM_LRDDR4;
+               else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+                       pvt->dram_type = MEM_RDDR4;
+               else
+                       pvt->dram_type = MEM_DDR4;
+               return;
+       }
+
        switch (pvt->fam) {
        case 0xf:
                if (pvt->ext_model >= K8_REV_F)
@@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt)
        case 0x16:
                goto ddr3;
 
-       case 0x17:
-       case 0x18:
-               if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
-                       pvt->dram_type = MEM_LRDDR4;
-               else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
-                       pvt->dram_type = MEM_RDDR4;
-               else
-                       pvt->dram_type = MEM_DDR4;
-               return;
-
        default:
                WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
                pvt->dram_type = MEM_EMPTY;
@@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = {
                        .dbam_to_cs             = f17_addr_mask_to_cs_size,
                }
        },
+       [F19_CPUS] = {
+               .ctl_name = "F19h",
+               .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+               .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+               .max_mcs = 8,
+               .ops = {
+                       .early_channel_count    = f17_early_channel_count,
+                       .dbam_to_cs             = f17_addr_mask_to_cs_size,
+               }
+       },
 };
 
 /*
@@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
                        family_types[F17_CPUS].ctl_name = "F18h";
                break;
 
+       case 0x19:
+               fam_type        = &family_types[F19_CPUS];
+               pvt->ops        = &family_types[F19_CPUS].ops;
+               family_types[F19_CPUS].ctl_name = "F19h";
+               break;
+
        default:
                amd64_err("Unsupported family!\n");
                return NULL;
@@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid)
        struct mem_ctl_info *mci;
        struct amd64_pvt *pvt;
 
-       mci = find_mci_by_dev(&F3->dev);
-       WARN_ON(!mci);
-
        /* Remove from EDAC CORE tracking list */
        mci = edac_mc_del_mc(&F3->dev);
        if (!mci)
@@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
        { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,  X86_FEATURE_ANY, 0 },
        { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY,  X86_FEATURE_ANY, 0 },
        { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+       { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY,  X86_FEATURE_ANY, 0 },
        { }
 };
 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
index 9be31688110bd2043b208a055a5bd76bc2919aac..abbf3c274d7498667359e4bdfeca5a6627487b54 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0    0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6    0x1656
 
 /*
  * Function 1 - Address Map
@@ -292,6 +294,7 @@ enum amd_families {
        F17_M10H_CPUS,
        F17_M30H_CPUS,
        F17_M70H_CPUS,
+       F19_CPUS,
        NUM_FAMILIES,
 };
 
index 09a9e3de95951e55bea7ad8446ecc76208927768..b194658b8b5c9fd4d235c1ec65f13980ac4f511f 100644 (file)
@@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci)
        if (!np) {
                dev_err(mci->pdev, "dt: missing /memory node\n");
                return -ENODEV;
-       };
+       }
 
        rc = of_address_to_resource(np, 0, &r);
 
@@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci)
        if (rc) {
                dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
                return rc;
-       };
+       }
 
        dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
                r.start, resource_size(&r), PAGE_SHIFT);
index f564a4a8a4aec88cf1cfa1ebe9b4648c16d2a523..5c1eea96230c3fc5252576da7f6a5fb378f5bdea 100644 (file)
@@ -324,7 +324,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
 
        pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
        mchbar &= I3000_MCHBAR_MASK;
-       window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+       window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE);
        if (!window) {
                printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
                        mchbar);
index 432b375a407540e499e00b7abf4abd4ca995a0d0..a8988db6d4235f65fa33793b4bd2d55d8e63dfab 100644 (file)
@@ -280,7 +280,7 @@ static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
                return NULL;
        }
 
-       window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+       window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE);
        if (!window)
                printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
                        (unsigned long long)u.mchbar);
index 0ddc41e47a96f515f41c90d6a398caacecff000c..191aa7c19ded7005f56ee0824162754066d88ff4 100644 (file)
@@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a)
        return a & ((1 << 16) - 1);
 }
 
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
-       return a & ((1 << 18) - 1);
-}
-
 static inline u32 i5100_recmema_merr(u32 a)
 {
        return i5100_nrecmema_merr(a);
@@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
        u32 dw;
        u32 dw2;
        unsigned syndrome = 0;
-       unsigned ecc_loc = 0;
        unsigned merr;
        unsigned bank;
        unsigned rank;
@@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan,
                pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
                syndrome = dw2;
                pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
-               ecc_loc = i5100_redmemb_ecc_locator(dw2);
        }
 
        if (i5100_validlog_recmemvalid(dw)) {
index 7c6a2d4d23606de53e9edcbdab9f334a21b04499..6be99e0d850dc12b68b96d907d6d94f7f5fb1f53 100644 (file)
@@ -485,7 +485,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
                goto fail0;
        }
        mchbar &= 0xffffc000;   /* bits 31:14 used for 16K window */
-       mch_window = ioremap_nocache(mchbar, 0x1000);
+       mch_window = ioremap(mchbar, 0x1000);
        if (!mch_window) {
                edac_dbg(3, "error ioremapping MCHBAR!\n");
                goto fail0;
index 4f65073f230b2b70db37f9bbbe456f7da2f59246..d68346a8e141a80270e9dd9f255b0a5adeef3de0 100644 (file)
@@ -357,7 +357,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
                return NULL;
        }
 
-       window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+       window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
        if (!window)
                ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
                               (unsigned long long)u.mchbar);
index ea622c6f3a393daea4f263a60893ce5f344c8a86..ea980c556f2e3de18bafe70f1d540d7b0b775f4b 100644 (file)
@@ -6,7 +6,7 @@
 
 #include "mce_amd.h"
 
-static struct amd_decoder_ops *fam_ops;
+static struct amd_decoder_ops fam_ops;
 
 static u8 xec_mask      = 0xf;
 
@@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = {
        "L2 Fill Data error",
 };
 
+static const char * const smca_ls2_mce_desc[] = {
+       "An ECC error was detected on a data cache read by a probe or victimization",
+       "An ECC error or L2 poison was detected on a data cache read by a load",
+       "An ECC error was detected on a data cache read-modify-write by a store",
+       "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
+       "An ECC error or poison bit mismatch was detected on a tag read by a load",
+       "An ECC error or poison bit mismatch was detected on a tag read by a store",
+       "An ECC error was detected on an EMEM read by a load",
+       "An ECC error was detected on an EMEM read-modify-write by a store",
+       "A parity error was detected in an L1 TLB entry by any access",
+       "A parity error was detected in an L2 TLB entry by any access",
+       "A parity error was detected in a PWC entry by any access",
+       "A parity error was detected in an STQ entry by any access",
+       "A parity error was detected in an LDQ entry by any access",
+       "A parity error was detected in a MAB entry by any access",
+       "A parity error was detected in an SCB entry state field by any access",
+       "A parity error was detected in an SCB entry address field by any access",
+       "A parity error was detected in an SCB entry data field by any access",
+       "A parity error was detected in a WCB entry by any access",
+       "A poisoned line was detected in an SCB entry by any access",
+       "A SystemReadDataError error was reported on read data returned from L2 for a load",
+       "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
+       "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
+       "A hardware assertion error was reported",
+       "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
+};
+
 static const char * const smca_if_mce_desc[] = {
        "Op Cache Microtag Probe Port Parity Error",
        "IC Microtag or Full Tag Multi-hit Error",
@@ -378,6 +405,7 @@ struct smca_mce_desc {
 
 static struct smca_mce_desc smca_mce_descs[] = {
        [SMCA_LS]       = { smca_ls_mce_desc,   ARRAY_SIZE(smca_ls_mce_desc)    },
+       [SMCA_LS_V2]    = { smca_ls2_mce_desc,  ARRAY_SIZE(smca_ls2_mce_desc)   },
        [SMCA_IF]       = { smca_if_mce_desc,   ARRAY_SIZE(smca_if_mce_desc)    },
        [SMCA_L2_CACHE] = { smca_l2_mce_desc,   ARRAY_SIZE(smca_l2_mce_desc)    },
        [SMCA_DE]       = { smca_de_mce_desc,   ARRAY_SIZE(smca_de_mce_desc)    },
@@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m)
                                            : (xec ? "multimatch" : "parity")));
                        return;
                }
-       } else if (fam_ops->mc0_mce(ec, xec))
+       } else if (fam_ops.mc0_mce(ec, xec))
                ;
        else
                pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
@@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m)
                        pr_cont("Hardware Assert.\n");
                else
                        goto wrong_mc1_mce;
-       } else if (fam_ops->mc1_mce(ec, xec))
+       } else if (fam_ops.mc1_mce(ec, xec))
                ;
        else
                goto wrong_mc1_mce;
@@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m)
 
        pr_emerg(HW_ERR "MC2 Error: ");
 
-       if (!fam_ops->mc2_mce(ec, xec))
+       if (!fam_ops.mc2_mce(ec, xec))
                pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
 }
 
@@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
        if (m->tsc)
                pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
 
-       if (!fam_ops)
+       /* Doesn't matter which member to test. */
+       if (!fam_ops.mc0_mce)
                goto err_code;
 
        switch (m->bank) {
@@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void)
            c->x86_vendor != X86_VENDOR_HYGON)
                return -ENODEV;
 
-       fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
-       if (!fam_ops)
-               return -ENOMEM;
+       if (boot_cpu_has(X86_FEATURE_SMCA)) {
+               xec_mask = 0x3f;
+               goto out;
+       }
 
        switch (c->x86) {
        case 0xf:
-               fam_ops->mc0_mce = k8_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = k8_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x10:
-               fam_ops->mc0_mce = f10h_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = f10h_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x11:
-               fam_ops->mc0_mce = k8_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = k8_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x12:
-               fam_ops->mc0_mce = f12h_mc0_mce;
-               fam_ops->mc1_mce = k8_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = f12h_mc0_mce;
+               fam_ops.mc1_mce = k8_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x14:
-               fam_ops->mc0_mce = cat_mc0_mce;
-               fam_ops->mc1_mce = cat_mc1_mce;
-               fam_ops->mc2_mce = k8_mc2_mce;
+               fam_ops.mc0_mce = cat_mc0_mce;
+               fam_ops.mc1_mce = cat_mc1_mce;
+               fam_ops.mc2_mce = k8_mc2_mce;
                break;
 
        case 0x15:
                xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
 
-               fam_ops->mc0_mce = f15h_mc0_mce;
-               fam_ops->mc1_mce = f15h_mc1_mce;
-               fam_ops->mc2_mce = f15h_mc2_mce;
+               fam_ops.mc0_mce = f15h_mc0_mce;
+               fam_ops.mc1_mce = f15h_mc1_mce;
+               fam_ops.mc2_mce = f15h_mc2_mce;
                break;
 
        case 0x16:
                xec_mask = 0x1f;
-               fam_ops->mc0_mce = cat_mc0_mce;
-               fam_ops->mc1_mce = cat_mc1_mce;
-               fam_ops->mc2_mce = f16h_mc2_mce;
+               fam_ops.mc0_mce = cat_mc0_mce;
+               fam_ops.mc1_mce = cat_mc1_mce;
+               fam_ops.mc2_mce = f16h_mc2_mce;
                break;
 
        case 0x17:
        case 0x18:
-               xec_mask = 0x3f;
-               if (!boot_cpu_has(X86_FEATURE_SMCA)) {
-                       printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
-                       goto err_out;
-               }
-               break;
+               pr_warn("Decoding supported only on Scalable MCA processors.\n");
+               return -EINVAL;
 
        default:
                printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
-               goto err_out;
+               return -EINVAL;
        }
 
+out:
        pr_info("MCE: In-kernel MCE decoding enabled.\n");
 
        mce_register_decode_chain(&amd_mce_dec_nb);
 
        return 0;
-
-err_out:
-       kfree(fam_ops);
-       fam_ops = NULL;
-       return -EINVAL;
 }
 early_initcall(mce_amd_init);
 
@@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init);
 static void __exit mce_amd_exit(void)
 {
        mce_unregister_decode_chain(&amd_mce_dec_nb);
-       kfree(fam_ops);
 }
 
 MODULE_DESCRIPTION("AMD MCE decoder");
index 413cdb4a591db1758d799277646bcce6601c31f1..3a3dcb14ed99d8c0aa22d868f3310b34b62b92e3 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/edac.h>
 #include <linux/platform_device.h>
 #include "edac_module.h"
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
 
 #define DRVNAME "sifive_edac"
 
@@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev)
        p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
                                            1, 1, NULL, 0,
                                            edac_device_alloc_index());
-       if (IS_ERR(p->dci))
-               return PTR_ERR(p->dci);
+       if (!p->dci)
+               return -ENOMEM;
 
        p->dci->dev = &pdev->dev;
        p->dci->mod_name = "Sifive ECC Manager";
index 95662a4ff4c4fac90c73a866e0c552ee926e2d47..99bbaf629b8d90ee18a321b68bac0edd55f62049 100644 (file)
@@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
 
        pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
        if (!pdev) {
-               skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+               edac_dbg(2, "Can't get tolm/tohm\n");
                return -ENODEV;
        }
 
index cc779f3f9e2dc92592de30b7eb3d9cf31e807e67..a65e2f78a402237500181e2368219d0d1c90da96 100644 (file)
@@ -266,7 +266,7 @@ static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
                return NULL;
        }
 
-       window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
+       window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE);
        if (!window)
                printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
                        (unsigned long long)u.mchbar);
index 0cc7466736773e8e0a1fb3f52732714820c730f5..6ca2f5ab6c57e8849dda47ad66b7d635341e50bf 100644 (file)
@@ -551,7 +551,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
        INIT_LIST_HEAD(&lynx->client_list);
        kref_init(&lynx->kref);
 
-       lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
+       lynx->registers = ioremap(pci_resource_start(dev, 0),
                                          PCILYNX_MAX_REGISTER);
        if (lynx->registers == NULL) {
                dev_err(&dev->dev, "Failed to map registers\n");
index da04fdae62a15da32a25399a0926f503685b70d7..835ece9c00f106c74b7b5d6ed6246d124e442f21 100644 (file)
@@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
        void __iomem *iobase;
        int err;
 
-       iobase = ioremap_nocache(base, lim);
+       iobase = ioremap(base, lim);
        if (!iobase)
                return -ENOMEM;
 
index 5b7ef89eb70143875ad4dd6a1d07da0bfc67c973..ed10da5313e8652b3ff72bc16283a9315eb9370e 100644 (file)
@@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev)
        fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
                                    TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
        if (IS_ERR(fw_shm_pool)) {
-               tee_client_close_context(pvt_data.ctx);
                dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
                err = PTR_ERR(fw_shm_pool);
                goto out_sess;
index bcc378c19ebecf9f75b4c8cf0854a68348f5c8e1..ecc83e2f032c35018ca6c6b6db0be3f36eb6ef67 100644 (file)
@@ -215,6 +215,28 @@ config EFI_RCI2_TABLE
 
          Say Y here for Dell EMC PowerEdge systems.
 
+config EFI_DISABLE_PCI_DMA
+       bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
+       help
+         Disable the busmaster bit in the control register on all PCI bridges
+         while calling ExitBootServices() and passing control to the runtime
+         kernel. System firmware may configure the IOMMU to prevent malicious
+         PCI devices from being able to attack the OS via DMA. However, since
+         firmware can't guarantee that the OS is IOMMU-aware, it will tear
+         down IOMMU configuration when ExitBootServices() is called. This
+         leaves a window between where a hostile device could still cause
+         damage before Linux configures the IOMMU again.
+
+         If you say Y here, the EFI stub will clear the busmaster bit on all
+         PCI bridges before ExitBootServices() is called. This will prevent
+         any malicious PCI devices from being able to perform DMA until the
+         kernel reenables busmastering after configuring the IOMMU.
+
+         This option will cause failures with some poorly behaved hardware
+         and should not be enabled without testing. The kernel commandline
+         options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
+         may be used to override this option.
+
 endmenu
 
 config UEFI_CPER
index 904fa09e6a6b0341ab3437080a25ed7b228bd806..d99f5b0c8a09059de5061ffb3ef444751de6b9a9 100644 (file)
 #define pr_fmt(fmt)    "efi: " fmt
 
 #include <linux/efi.h>
+#include <linux/fwnode.h>
 #include <linux/init.h>
 #include <linux/memblock.h>
 #include <linux/mm_types.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_fdt.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
@@ -276,15 +278,112 @@ void __init efi_init(void)
                efi_memmap_unmap();
 }
 
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+       u64 fb_base = screen_info.lfb_base;
+
+       if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+               fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+       return fb_base >= range->cpu_addr &&
+              fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+       struct device_node *np;
+
+       for_each_node_by_type(np, "pci") {
+               struct of_pci_range_parser parser;
+               struct of_pci_range range;
+               int err;
+
+               err = of_pci_range_parser_init(&parser, np);
+               if (err) {
+                       pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+                       continue;
+               }
+
+               for_each_of_pci_range(&parser, &range)
+                       if (efifb_overlaps_pci_range(&range))
+                               return np;
+       }
+       return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(const struct fwnode_handle *fwnode,
+                          struct device *dev)
+{
+       struct device_node *sup_np;
+       struct device *sup_dev;
+
+       sup_np = find_pci_overlap_node();
+
+       /*
+        * If there's no PCI graphics controller backing the efifb, we are
+        * done here.
+        */
+       if (!sup_np)
+               return 0;
+
+       sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+       of_node_put(sup_np);
+
+       /*
+        * Return -ENODEV if the PCI graphics controller device hasn't been
+        * registered yet.  This ensures that efifb isn't allowed to probe
+        * and this function is retried again when new devices are
+        * registered.
+        */
+       if (!sup_dev)
+               return -ENODEV;
+
+       /*
+        * If this fails, retrying this function at a later point won't
+        * change anything. So, don't return an error after this.
+        */
+       if (!device_link_add(dev, sup_dev, 0))
+               dev_warn(dev, "device_link_add() failed\n");
+
+       put_device(sup_dev);
+
+       return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+       .add_links = efifb_add_links,
+};
+
+static struct fwnode_handle efifb_fwnode = {
+       .ops = &efifb_fwnode_ops,
+};
+
 static int __init register_gop_device(void)
 {
-       void *pd;
+       struct platform_device *pd;
+       int err;
 
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
                return 0;
 
-       pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
-                                          &screen_info, sizeof(screen_info));
-       return PTR_ERR_OR_ZERO(pd);
+       pd = platform_device_alloc("efi-framebuffer", 0);
+       if (!pd)
+               return -ENOMEM;
+
+       if (IS_ENABLED(CONFIG_PCI))
+               pd->dev.fwnode = &efifb_fwnode;
+
+       err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
+       if (err)
+               return err;
+
+       return platform_device_add(pd);
 }
 subsys_initcall(register_gop_device);
index b1395133389edb89ff549b5705c5e4b9d794981c..d3067cbd51143fade89c68ef6ca15346622121b0 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/miscdevice.h>
 #include <linux/highmem.h>
+#include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/efi.h>
index c9a0efca17b01b9d3959cd055f019683eeceae5d..5d4f84781aa036bf83d7b842442d24e438111649 100644 (file)
 
 #include <asm/early_ioremap.h>
 
+static const struct console *earlycon_console __initdata;
 static const struct font_desc *font;
 static u32 efi_x, efi_y;
 static u64 fb_base;
-static pgprot_t fb_prot;
+static bool fb_wb;
+static void *efi_fb;
+
+/*
+ * EFI earlycon needs to use early_memremap() to map the framebuffer.
+ * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon',
+ * memremap() should be used instead. memremap() will be available after
+ * paging_init() which is earlier than initcall callbacks. Thus adding this
+ * early initcall function early_efi_map_fb() to map the whole EFI framebuffer.
+ */
+static int __init efi_earlycon_remap_fb(void)
+{
+       /* bail if there is no bootconsole or it has been disabled already */
+       if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
+               return 0;
+
+       efi_fb = memremap(fb_base, screen_info.lfb_size,
+                         fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
+
+       return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(efi_earlycon_remap_fb);
+
+static int __init efi_earlycon_unmap_fb(void)
+{
+       /* unmap the bootconsole fb unless keep_bootcon has left it enabled */
+       if (efi_fb && !(earlycon_console->flags & CON_ENABLED))
+               memunmap(efi_fb);
+       return 0;
+}
+late_initcall(efi_earlycon_unmap_fb);
 
 static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
 {
+       pgprot_t fb_prot;
+
+       if (efi_fb)
+               return efi_fb + start;
+
+       fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
        return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
 }
 
 static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
 {
+       if (efi_fb)
+               return;
+
        early_memunmap(addr, len);
 }
 
@@ -176,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
        if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
                fb_base |= (u64)screen_info.ext_lfb_base << 32;
 
-       if (opt && !strcmp(opt, "ram"))
-               fb_prot = PAGE_KERNEL;
-       else
-               fb_prot = pgprot_writecombine(PAGE_KERNEL);
+       fb_wb = opt && !strcmp(opt, "ram");
 
        si = &screen_info;
        xres = si->lfb_width;
@@ -201,6 +238,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
                efi_earlycon_scroll_up();
 
        device->con->write = efi_earlycon_write;
+       earlycon_console = device->con;
        return 0;
 }
 EARLYCON_DECLARE(efifb, efi_earlycon_setup);
index d101f072c8f8ac85db7605966a979a05f06abcae..621220ab3d0e348fdff0eda1af30f499ed37d1ce 100644 (file)
@@ -681,7 +681,7 @@ device_initcall(efi_load_efivars);
                { name },                                  \
                { prop },                                  \
                offsetof(struct efi_fdt_params, field),    \
-               FIELD_SIZEOF(struct efi_fdt_params, field) \
+               sizeof_field(struct efi_fdt_params, field) \
        }
 
 struct params {
@@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr)
  *
  * Search in the EFI memory map for the region covering @phys_addr.
  * Returns the EFI memory type if the region was found in the memory
- * map, EFI_RESERVED_TYPE (zero) otherwise.
+ * map, -EINVAL otherwise.
  */
 int efi_mem_type(unsigned long phys_addr)
 {
@@ -979,6 +979,24 @@ static int __init efi_memreserve_map_root(void)
        return 0;
 }
 
+static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
+{
+       struct resource *res, *parent;
+
+       res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+       if (!res)
+               return -ENOMEM;
+
+       res->name       = "reserved";
+       res->flags      = IORESOURCE_MEM;
+       res->start      = addr;
+       res->end        = addr + size - 1;
+
+       /* we expect a conflict with a 'System RAM' region */
+       parent = request_resource_conflict(&iomem_resource, res);
+       return parent ? request_resource(parent, res) : 0;
+}
+
 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
 {
        struct linux_efi_memreserve *rsv;
@@ -1003,7 +1021,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
                        rsv->entry[index].size = size;
 
                        memunmap(rsv);
-                       return 0;
+                       return efi_mem_reserve_iomem(addr, size);
                }
                memunmap(rsv);
        }
@@ -1013,6 +1031,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
        if (!rsv)
                return -ENOMEM;
 
+       rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
+       if (rc) {
+               free_page((unsigned long)rsv);
+               return rc;
+       }
+
        /*
         * The memremap() call above assumes that a linux_efi_memreserve entry
         * never crosses a page boundary, so let's ensure that this remains true
@@ -1029,7 +1053,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
        efi_memreserve_root->next = __pa(rsv);
        spin_unlock(&efi_mem_reserve_persistent_lock);
 
-       return 0;
+       return efi_mem_reserve_iomem(addr, size);
 }
 
 static int __init efi_memreserve_root_init(void)
index bb9fc70d0cfab97d6f03779ab53e0139709dc5c9..6e0f34a38171d921c1a42931e53d44cdf515a293 100644 (file)
@@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
        return 0;
 }
 
-void __init efi_fake_memmap(void)
+static void __init efi_fake_range(struct efi_mem_range *efi_range)
 {
+       struct efi_memory_map_data data = { 0 };
        int new_nr_map = efi.memmap.nr_map;
        efi_memory_desc_t *md;
-       phys_addr_t new_memmap_phy;
        void *new_memmap;
-       int i;
-
-       if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
-               return;
 
        /* count up the number of EFI memory descriptor */
-       for (i = 0; i < nr_fake_mem; i++) {
-               for_each_efi_memory_desc(md) {
-                       struct range *r = &efi_fake_mems[i].range;
-
-                       new_nr_map += efi_memmap_split_count(md, r);
-               }
-       }
+       for_each_efi_memory_desc(md)
+               new_nr_map += efi_memmap_split_count(md, &efi_range->range);
 
        /* allocate memory for new EFI memmap */
-       new_memmap_phy = efi_memmap_alloc(new_nr_map);
-       if (!new_memmap_phy)
+       if (efi_memmap_alloc(new_nr_map, &data) != 0)
                return;
 
        /* create new EFI memmap */
-       new_memmap = early_memremap(new_memmap_phy,
-                                   efi.memmap.desc_size * new_nr_map);
+       new_memmap = early_memremap(data.phys_map, data.size);
        if (!new_memmap) {
-               memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
+               __efi_memmap_free(data.phys_map, data.size, data.flags);
                return;
        }
 
-       for (i = 0; i < nr_fake_mem; i++)
-               efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
+       efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
 
        /* swap into new EFI memmap */
-       early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+       early_memunmap(new_memmap, data.size);
+
+       efi_memmap_install(&data);
+}
+
+void __init efi_fake_memmap(void)
+{
+       int i;
 
-       efi_memmap_install(new_memmap_phy, new_nr_map);
+       if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+               return;
+
+       for (i = 0; i < nr_fake_mem; i++)
+               efi_fake_range(&efi_fake_mems[i]);
 
        /* print new EFI memmap */
        efi_print_memmap();
index c35f893897e146e0cd43f4afe45046f83ce636ce..98a81576213d8d889a27f88ace8399ec08ae415b 100644 (file)
@@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD     := y
 KCOV_INSTRUMENT                        := n
 
 lib-y                          := efi-stub-helper.o gop.o secureboot.o tpm.o \
-                                  random.o
+                                  random.o pci.o
 
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
 arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
index 817237ce2420f11ba0cb37655a977a7f012d7e9f..7bbef4a6735048977b616a9f018d8d04fdcf5951 100644 (file)
 
 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
 
-void efi_char16_printk(efi_system_table_t *sys_table_arg,
-                             efi_char16_t *str)
-{
-       struct efi_simple_text_output_protocol *out;
+static efi_system_table_t *__efistub_global sys_table;
 
-       out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
-       out->output_string(out, str);
+__pure efi_system_table_t *efi_system_table(void)
+{
+       return sys_table;
 }
 
-static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+static struct screen_info *setup_graphics(void)
 {
        efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
        efi_status_t status;
@@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
        struct screen_info *si = NULL;
 
        size = 0;
-       status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
-                               &gop_proto, NULL, &size, gop_handle);
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                            &gop_proto, NULL, &size, gop_handle);
        if (status == EFI_BUFFER_TOO_SMALL) {
-               si = alloc_screen_info(sys_table_arg);
+               si = alloc_screen_info();
                if (!si)
                        return NULL;
-               efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+               efi_setup_gop(si, &gop_proto, size);
        }
        return si;
 }
 
-void install_memreserve_table(efi_system_table_t *sys_table_arg)
+void install_memreserve_table(void)
 {
        struct linux_efi_memreserve *rsv;
        efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
        efi_status_t status;
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
-                               (void **)&rsv);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+                            (void **)&rsv);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+               pr_efi_err("Failed to allocate memreserve entry!\n");
                return;
        }
 
@@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
        rsv->size = 0;
        atomic_set(&rsv->count, 0);
 
-       status = efi_call_early(install_configuration_table,
-                               &memreserve_table_guid,
-                               rsv);
+       status = efi_bs_call(install_configuration_table,
+                            &memreserve_table_guid, rsv);
        if (status != EFI_SUCCESS)
-               pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+               pr_efi_err("Failed to install memreserve config table!\n");
 }
 
 
@@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
  * must be reserved. On failure it is required to free all
  * all allocations it has made.
  */
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
-                                unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
                                 unsigned long *image_size,
                                 unsigned long *reserve_addr,
                                 unsigned long *reserve_size,
@@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
  * for both archictectures, with the arch-specific code provided in the
  * handle_kernel_image() function.
  */
-unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
+unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg,
                               unsigned long *image_addr)
 {
        efi_loaded_image_t *image;
@@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        enum efi_secureboot_mode secure_boot;
        struct screen_info *si;
 
+       sys_table = sys_table_arg;
+
        /* Check if we were booted by the EFI firmware */
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                goto fail;
 
-       status = check_platform_features(sys_table);
+       status = check_platform_features();
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        status = sys_table->boottime->handle_protocol(handle,
                                        &loaded_image_proto, (void *)&image);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Failed to get loaded image protocol\n");
+               pr_efi_err("Failed to get loaded image protocol\n");
                goto fail;
        }
 
-       dram_base = get_dram_base(sys_table);
+       dram_base = get_dram_base();
        if (dram_base == EFI_ERROR) {
-               pr_efi_err(sys_table, "Failed to find DRAM base\n");
+               pr_efi_err("Failed to find DRAM base\n");
                goto fail;
        }
 
@@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
         * protocol. We are going to copy the command line into the
         * device tree, so this can be allocated anywhere.
         */
-       cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
+       cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
        if (!cmdline_ptr) {
-               pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
+               pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
                goto fail;
        }
 
@@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
                efi_parse_options(cmdline_ptr);
 
-       pr_efi(sys_table, "Booting Linux Kernel...\n");
+       pr_efi("Booting Linux Kernel...\n");
 
-       si = setup_graphics(sys_table);
+       si = setup_graphics();
 
-       status = handle_kernel_image(sys_table, image_addr, &image_size,
+       status = handle_kernel_image(image_addr, &image_size,
                                     &reserve_addr,
                                     &reserve_size,
                                     dram_base, image);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Failed to relocate kernel\n");
+               pr_efi_err("Failed to relocate kernel\n");
                goto fail_free_cmdline;
        }
 
-       efi_retrieve_tpm2_eventlog(sys_table);
+       efi_retrieve_tpm2_eventlog();
 
        /* Ask the firmware to clear memory on unclean shutdown */
-       efi_enable_reset_attack_mitigation(sys_table);
+       efi_enable_reset_attack_mitigation();
 
-       secure_boot = efi_get_secureboot(sys_table);
+       secure_boot = efi_get_secureboot();
 
        /*
         * Unauthenticated device tree data is a security hazard, so ignore
@@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
             secure_boot != efi_secureboot_mode_disabled) {
                if (strstr(cmdline_ptr, "dtb="))
-                       pr_efi(sys_table, "Ignoring DTB from command line.\n");
+                       pr_efi("Ignoring DTB from command line.\n");
        } else {
-               status = handle_cmdline_files(sys_table, image, cmdline_ptr,
-                                             "dtb=",
+               status = handle_cmdline_files(image, cmdline_ptr, "dtb=",
                                              ~0UL, &fdt_addr, &fdt_size);
 
                if (status != EFI_SUCCESS) {
-                       pr_efi_err(sys_table, "Failed to load device tree!\n");
+                       pr_efi_err("Failed to load device tree!\n");
                        goto fail_free_image;
                }
        }
 
        if (fdt_addr) {
-               pr_efi(sys_table, "Using DTB from command line\n");
+               pr_efi("Using DTB from command line\n");
        } else {
                /* Look for a device tree configuration table entry. */
-               fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size);
+               fdt_addr = (uintptr_t)get_fdt(&fdt_size);
                if (fdt_addr)
-                       pr_efi(sys_table, "Using DTB from configuration table\n");
+                       pr_efi("Using DTB from configuration table\n");
        }
 
        if (!fdt_addr)
-               pr_efi(sys_table, "Generating empty DTB\n");
+               pr_efi("Generating empty DTB\n");
 
-       status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=",
+       status = handle_cmdline_files(image, cmdline_ptr, "initrd=",
                                      efi_get_max_initrd_addr(dram_base,
                                                              *image_addr),
                                      (unsigned long *)&initrd_addr,
                                      (unsigned long *)&initrd_size);
        if (status != EFI_SUCCESS)
-               pr_efi_err(sys_table, "Failed initrd from command line!\n");
+               pr_efi_err("Failed initrd from command line!\n");
 
-       efi_random_get_seed(sys_table);
+       efi_random_get_seed();
 
        /* hibernation expects the runtime regions to stay in the same place */
        if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
@@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
                                            EFI_RT_VIRTUAL_SIZE;
                u32 rnd;
 
-               status = efi_get_random_bytes(sys_table, sizeof(rnd),
-                                             (u8 *)&rnd);
+               status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd);
                if (status == EFI_SUCCESS) {
                        virtmap_base = EFI_RT_VIRTUAL_BASE +
                                       (((headroom >> 21) * rnd) >> (32 - 21));
                }
        }
 
-       install_memreserve_table(sys_table);
+       install_memreserve_table();
 
        new_fdt_addr = fdt_addr;
-       status = allocate_new_fdt_and_exit_boot(sys_table, handle,
+       status = allocate_new_fdt_and_exit_boot(handle,
                                &new_fdt_addr, efi_get_max_fdt_addr(dram_base),
                                initrd_addr, initrd_size, cmdline_ptr,
                                fdt_addr, fdt_size);
@@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        if (status == EFI_SUCCESS)
                return new_fdt_addr;
 
-       pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n");
+       pr_efi_err("Failed to update FDT and exit boot services\n");
 
-       efi_free(sys_table, initrd_size, initrd_addr);
-       efi_free(sys_table, fdt_size, fdt_addr);
+       efi_free(initrd_size, initrd_addr);
+       efi_free(fdt_size, fdt_addr);
 
 fail_free_image:
-       efi_free(sys_table, image_size, *image_addr);
-       efi_free(sys_table, reserve_size, reserve_addr);
+       efi_free(image_size, *image_addr);
+       efi_free(reserve_size, reserve_addr);
 fail_free_cmdline:
-       free_screen_info(sys_table, si);
-       efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
+       free_screen_info(si);
+       efi_free(cmdline_size, (unsigned long)cmdline_ptr);
 fail:
        return EFI_ERROR;
 }
index 4566640de650d41856de686619a4e6e4226d4772..7b2a6382b647741d7a9f11f1dd2585f08e01c51f 100644 (file)
@@ -7,7 +7,7 @@
 
 #include "efistub.h"
 
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
 {
        int block;
 
@@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
        /* LPAE kernels need compatible hardware */
        block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
        if (block < 5) {
-               pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+               pr_efi_err("This LPAE kernel is not supported by your CPU\n");
                return EFI_UNSUPPORTED;
        }
        return EFI_SUCCESS;
@@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
 
 static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
 
-struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+struct screen_info *alloc_screen_info(void)
 {
        struct screen_info *si;
        efi_status_t status;
@@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
         * its contents while we hand over to the kernel proper from the
         * decompressor.
         */
-       status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
-                               sizeof(*si), (void **)&si);
+       status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+                            sizeof(*si), (void **)&si);
 
        if (status != EFI_SUCCESS)
                return NULL;
 
-       status = efi_call_early(install_configuration_table,
-                               &screen_info_guid, si);
+       status = efi_bs_call(install_configuration_table,
+                            &screen_info_guid, si);
        if (status == EFI_SUCCESS)
                return si;
 
-       efi_call_early(free_pool, si);
+       efi_bs_call(free_pool, si);
        return NULL;
 }
 
-void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+void free_screen_info(struct screen_info *si)
 {
        if (!si)
                return;
 
-       efi_call_early(install_configuration_table, &screen_info_guid, NULL);
-       efi_call_early(free_pool, si);
+       efi_bs_call(install_configuration_table, &screen_info_guid, NULL);
+       efi_bs_call(free_pool, si);
 }
 
-static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
-                                       unsigned long dram_base,
+static efi_status_t reserve_kernel_base(unsigned long dram_base,
                                        unsigned long *reserve_addr,
                                        unsigned long *reserve_size)
 {
@@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
         */
        alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE;
        nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE;
-       status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
-                               EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
+       status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+                            EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
        if (status == EFI_SUCCESS) {
                if (alloc_addr == dram_base) {
                        *reserve_addr = alloc_addr;
@@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
         * released to the OS after ExitBootServices(), the decompressor can
         * safely overwrite them.
         */
-       status = efi_get_memory_map(sys_table_arg, &map);
+       status = efi_get_memory_map(&map);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table_arg,
-                          "reserve_kernel_base(): Unable to retrieve memory map.\n");
+               pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
                return status;
        }
 
@@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
                        start = max(start, (u64)dram_base);
                        end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE);
 
-                       status = efi_call_early(allocate_pages,
-                                               EFI_ALLOCATE_ADDRESS,
-                                               EFI_LOADER_DATA,
-                                               (end - start) / EFI_PAGE_SIZE,
-                                               &start);
+                       status = efi_bs_call(allocate_pages,
+                                            EFI_ALLOCATE_ADDRESS,
+                                            EFI_LOADER_DATA,
+                                            (end - start) / EFI_PAGE_SIZE,
+                                            &start);
                        if (status != EFI_SUCCESS) {
-                               pr_efi_err(sys_table_arg,
-                                       "reserve_kernel_base(): alloc failed.\n");
+                               pr_efi_err("reserve_kernel_base(): alloc failed.\n");
                                goto out;
                        }
                        break;
@@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
 
        status = EFI_SUCCESS;
 out:
-       efi_call_early(free_pool, memory_map);
+       efi_bs_call(free_pool, memory_map);
        return status;
 }
 
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
-                                unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
                                 unsigned long *image_size,
                                 unsigned long *reserve_addr,
                                 unsigned long *reserve_size,
@@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
         */
        kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
 
-       status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
-                                    reserve_size);
+       status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
+               pr_efi_err("Unable to allocate memory for uncompressed kernel.\n");
                return status;
        }
 
@@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
         * memory window.
         */
        *image_size = image->image_size;
-       status = efi_relocate_kernel(sys_table, image_addr, *image_size,
-                                    *image_size,
+       status = efi_relocate_kernel(image_addr, *image_size, *image_size,
                                     kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Failed to relocate kernel.\n");
-               efi_free(sys_table, *reserve_size, *reserve_addr);
+               pr_efi_err("Failed to relocate kernel.\n");
+               efi_free(*reserve_size, *reserve_addr);
                *reserve_size = 0;
                return status;
        }
@@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
         * address at which the zImage is loaded.
         */
        if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
-               pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
-               efi_free(sys_table, *reserve_size, *reserve_addr);
+               pr_efi_err("Failed to relocate kernel, no low memory available.\n");
+               efi_free(*reserve_size, *reserve_addr);
                *reserve_size = 0;
-               efi_free(sys_table, *image_size, *image_addr);
+               efi_free(*image_size, *image_addr);
                *image_size = 0;
                return EFI_LOAD_ERROR;
        }
index 1550d244e9961f0d5c7451dbc8b21b7038acd914..2915b44132e60413dfd626a69e60acc0fc0f15b7 100644 (file)
@@ -21,7 +21,7 @@
 
 #include "efistub.h"
 
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+efi_status_t check_platform_features(void)
 {
        u64 tg;
 
@@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
        tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
        if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
                if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
-                       pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+                       pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n");
                else
-                       pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+                       pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n");
                return EFI_UNSUPPORTED;
        }
        return EFI_SUCCESS;
 }
 
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
-                                unsigned long *image_addr,
+efi_status_t handle_kernel_image(unsigned long *image_addr,
                                 unsigned long *image_size,
                                 unsigned long *reserve_addr,
                                 unsigned long *reserve_size,
@@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
 
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
                if (!nokaslr()) {
-                       status = efi_get_random_bytes(sys_table_arg,
-                                                     sizeof(phys_seed),
+                       status = efi_get_random_bytes(sizeof(phys_seed),
                                                      (u8 *)&phys_seed);
                        if (status == EFI_NOT_FOUND) {
-                               pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+                               pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
                        } else if (status != EFI_SUCCESS) {
-                               pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+                               pr_efi_err("efi_get_random_bytes() failed\n");
                                return status;
                        }
                } else {
-                       pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+                       pr_efi("KASLR disabled on kernel command line\n");
                }
        }
 
@@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
                 * locate the kernel at a randomized offset in physical memory.
                 */
                *reserve_size = kernel_memsize + offset;
-               status = efi_random_alloc(sys_table_arg, *reserve_size,
+               status = efi_random_alloc(*reserve_size,
                                          MIN_KIMG_ALIGN, reserve_addr,
                                          (u32)phys_seed);
 
@@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
                *image_addr = *reserve_addr = preferred_offset;
                *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
 
-               status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
-                                       EFI_LOADER_DATA,
-                                       *reserve_size / EFI_PAGE_SIZE,
-                                       (efi_physical_addr_t *)reserve_addr);
+               status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+                                    EFI_LOADER_DATA,
+                                    *reserve_size / EFI_PAGE_SIZE,
+                                    (efi_physical_addr_t *)reserve_addr);
        }
 
        if (status != EFI_SUCCESS) {
                *reserve_size = kernel_memsize + TEXT_OFFSET;
-               status = efi_low_alloc(sys_table_arg, *reserve_size,
+               status = efi_low_alloc(*reserve_size,
                                       MIN_KIMG_ALIGN, reserve_addr);
 
                if (status != EFI_SUCCESS) {
-                       pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+                       pr_efi_err("Failed to relocate kernel\n");
                        *reserve_size = 0;
                        return status;
                }
index e02579907f2e21abc20511560f1d85feb91527a4..74ddfb496140f8bbe004991e5dd8747d4a3a233a 100644 (file)
  */
 #define EFI_READ_CHUNK_SIZE    (1024 * 1024)
 
-static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE;
 
-static int __section(.data) __nokaslr;
-static int __section(.data) __quiet;
-static int __section(.data) __novamap;
-static bool __section(.data) efi_nosoftreserve;
+static bool __efistub_global efi_nokaslr;
+static bool __efistub_global efi_quiet;
+static bool __efistub_global efi_novamap;
+static bool __efistub_global efi_nosoftreserve;
+static bool __efistub_global efi_disable_pci_dma =
+                                       IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
 
-int __pure nokaslr(void)
+bool __pure nokaslr(void)
 {
-       return __nokaslr;
+       return efi_nokaslr;
 }
-int __pure is_quiet(void)
+bool __pure is_quiet(void)
 {
-       return __quiet;
+       return efi_quiet;
 }
-int __pure novamap(void)
+bool __pure novamap(void)
 {
-       return __novamap;
+       return efi_novamap;
 }
 bool __pure __efi_soft_reserve_enabled(void)
 {
@@ -58,7 +60,7 @@ struct file_info {
        u64 size;
 };
 
-void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+void efi_printk(char *str)
 {
        char *s8;
 
@@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
                ch[0] = *s8;
                if (*s8 == '\n') {
                        efi_char16_t nl[2] = { '\r', 0 };
-                       efi_char16_printk(sys_table_arg, nl);
+                       efi_char16_printk(nl);
                }
 
-               efi_char16_printk(sys_table_arg, ch);
+               efi_char16_printk(ch);
        }
 }
 
@@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size,
        return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
 }
 
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
-                               struct efi_boot_memmap *map)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
 {
        efi_memory_desc_t *m = NULL;
        efi_status_t status;
@@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
        *map->map_size =        *map->desc_size * 32;
        *map->buff_size =       *map->map_size;
 again:
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               *map->map_size, (void **)&m);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+                            *map->map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
        *map->desc_size = 0;
        key = 0;
-       status = efi_call_early(get_memory_map, map->map_size, m,
-                               &key, map->desc_size, &desc_version);
+       status = efi_bs_call(get_memory_map, map->map_size, m,
+                            &key, map->desc_size, &desc_version);
        if (status == EFI_BUFFER_TOO_SMALL ||
            !mmap_has_headroom(*map->buff_size, *map->map_size,
                               *map->desc_size)) {
-               efi_call_early(free_pool, m);
+               efi_bs_call(free_pool, m);
                /*
                 * Make sure there is some entries of headroom so that the
                 * buffer can be reused for a new map after allocations are
@@ -122,7 +123,7 @@ again:
        }
 
        if (status != EFI_SUCCESS)
-               efi_call_early(free_pool, m);
+               efi_bs_call(free_pool, m);
 
        if (map->key_ptr && status == EFI_SUCCESS)
                *map->key_ptr = key;
@@ -135,7 +136,7 @@ fail:
 }
 
 
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+unsigned long get_dram_base(void)
 {
        efi_status_t status;
        unsigned long map_size, buff_size;
@@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
        boot_map.key_ptr =      NULL;
        boot_map.buff_size =    &buff_size;
 
-       status = efi_get_memory_map(sys_table_arg, &boot_map);
+       status = efi_get_memory_map(&boot_map);
        if (status != EFI_SUCCESS)
                return membase;
 
@@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
                }
        }
 
-       efi_call_early(free_pool, map.map);
+       efi_bs_call(free_pool, map.map);
 
        return membase;
 }
@@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
 /*
  * Allocate at the highest possible address that is not above 'max'.
  */
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
-                           unsigned long size, unsigned long align,
+efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
                            unsigned long *addr, unsigned long max)
 {
        unsigned long map_size, desc_size, buff_size;
@@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
        boot_map.key_ptr =      NULL;
        boot_map.buff_size =    &buff_size;
 
-       status = efi_get_memory_map(sys_table_arg, &boot_map);
+       status = efi_get_memory_map(&boot_map);
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -251,9 +251,8 @@ again:
        if (!max_addr)
                status = EFI_NOT_FOUND;
        else {
-               status = efi_call_early(allocate_pages,
-                                       EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
-                                       nr_pages, &max_addr);
+               status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+                                    EFI_LOADER_DATA, nr_pages, &max_addr);
                if (status != EFI_SUCCESS) {
                        max = max_addr;
                        max_addr = 0;
@@ -263,7 +262,7 @@ again:
                *addr = max_addr;
        }
 
-       efi_call_early(free_pool, map);
+       efi_bs_call(free_pool, map);
 fail:
        return status;
 }
@@ -271,8 +270,7 @@ fail:
 /*
  * Allocate at the lowest possible address that is not below 'min'.
  */
-efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
-                                unsigned long size, unsigned long align,
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
                                 unsigned long *addr, unsigned long min)
 {
        unsigned long map_size, desc_size, buff_size;
@@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
        boot_map.key_ptr =      NULL;
        boot_map.buff_size =    &buff_size;
 
-       status = efi_get_memory_map(sys_table_arg, &boot_map);
+       status = efi_get_memory_map(&boot_map);
        if (status != EFI_SUCCESS)
                goto fail;
 
@@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
                if ((start + size) > end)
                        continue;
 
-               status = efi_call_early(allocate_pages,
-                                       EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
-                                       nr_pages, &start);
+               status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+                                    EFI_LOADER_DATA, nr_pages, &start);
                if (status == EFI_SUCCESS) {
                        *addr = start;
                        break;
@@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
        if (i == map_size / desc_size)
                status = EFI_NOT_FOUND;
 
-       efi_call_early(free_pool, map);
+       efi_bs_call(free_pool, map);
 fail:
        return status;
 }
 
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
-             unsigned long addr)
+void efi_free(unsigned long size, unsigned long addr)
 {
        unsigned long nr_pages;
 
@@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
                return;
 
        nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
-       efi_call_early(free_pages, addr, nr_pages);
+       efi_bs_call(free_pages, addr, nr_pages);
 }
 
-static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
-                                 efi_char16_t *filename_16, void **handle,
-                                 u64 *file_sz)
+static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16,
+                                 void **handle, u64 *file_sz)
 {
        efi_file_handle_t *h, *fh = __fh;
        efi_file_info_t *info;
@@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh,
        efi_guid_t info_guid = EFI_FILE_INFO_ID;
        unsigned long info_sz;
 
-       status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16,
-                               EFI_FILE_MODE_READ, (u64)0);
+       status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to open file: ");
-               efi_char16_printk(sys_table_arg, filename_16);
-               efi_printk(sys_table_arg, "\n");
+               efi_printk("Failed to open file: ");
+               efi_char16_printk(filename_16);
+               efi_printk("\n");
                return status;
        }
 
        *handle = h;
 
        info_sz = 0;
-       status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
-                               &info_sz, NULL);
+       status = h->get_info(h, &info_guid, &info_sz, NULL);
        if (status != EFI_BUFFER_TOO_SMALL) {
-               efi_printk(sys_table_arg, "Failed to get file info size\n");
+               efi_printk("Failed to get file info size\n");
                return status;
        }
 
 grow:
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               info_sz, (void **)&info);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz,
+                            (void **)&info);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
+               efi_printk("Failed to alloc mem for file info\n");
                return status;
        }
 
-       status = efi_call_proto(efi_file_handle, get_info, h, &info_guid,
-                               &info_sz, info);
+       status = h->get_info(h, &info_guid, &info_sz, info);
        if (status == EFI_BUFFER_TOO_SMALL) {
-               efi_call_early(free_pool, info);
+               efi_bs_call(free_pool, info);
                goto grow;
        }
 
        *file_sz = info->file_size;
-       efi_call_early(free_pool, info);
+       efi_bs_call(free_pool, info);
 
        if (status != EFI_SUCCESS)
-               efi_printk(sys_table_arg, "Failed to get initrd info\n");
+               efi_printk("Failed to get initrd info\n");
 
        return status;
 }
 
-static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr)
+static efi_status_t efi_file_read(efi_file_handle_t *handle,
+                                 unsigned long *size, void *addr)
 {
-       return efi_call_proto(efi_file_handle, read, handle, size, addr);
+       return handle->read(handle, size, addr);
 }
 
-static efi_status_t efi_file_close(void *handle)
+static efi_status_t efi_file_close(efi_file_handle_t *handle)
 {
-       return efi_call_proto(efi_file_handle, close, handle);
+       return handle->close(handle);
 }
 
-static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
-                                   efi_loaded_image_t *image,
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
                                    efi_file_handle_t **__fh)
 {
        efi_file_io_interface_t *io;
        efi_file_handle_t *fh;
        efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
        efi_status_t status;
-       void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
-                                                            device_handle,
-                                                            image);
+       efi_handle_t handle = image->device_handle;
 
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
+       status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+               efi_printk("Failed to handle fs_proto\n");
                return status;
        }
 
-       status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+       status = io->open_volume(io, &fh);
        if (status != EFI_SUCCESS)
-               efi_printk(sys_table_arg, "Failed to open volume\n");
+               efi_printk("Failed to open volume\n");
        else
                *__fh = fh;
 
@@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline)
 
        str = strstr(cmdline, "nokaslr");
        if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
-               __nokaslr = 1;
+               efi_nokaslr = true;
 
        str = strstr(cmdline, "quiet");
        if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
-               __quiet = 1;
+               efi_quiet = true;
 
        /*
         * If no EFI parameters were specified on the cmdline we've got
@@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline)
        while (*str && *str != ' ') {
                if (!strncmp(str, "nochunk", 7)) {
                        str += strlen("nochunk");
-                       __chunk_size = -1UL;
+                       efi_chunk_size = -1UL;
                }
 
                if (!strncmp(str, "novamap", 7)) {
                        str += strlen("novamap");
-                       __novamap = 1;
+                       efi_novamap = true;
                }
 
                if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
                    !strncmp(str, "nosoftreserve", 7)) {
                        str += strlen("nosoftreserve");
-                       efi_nosoftreserve = 1;
+                       efi_nosoftreserve = true;
+               }
+
+               if (!strncmp(str, "disable_early_pci_dma", 21)) {
+                       str += strlen("disable_early_pci_dma");
+                       efi_disable_pci_dma = true;
+               }
+
+               if (!strncmp(str, "no_disable_early_pci_dma", 24)) {
+                       str += strlen("no_disable_early_pci_dma");
+                       efi_disable_pci_dma = false;
                }
 
                /* Group words together, delimited by "," */
@@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline)
  * We only support loading a file from the same filesystem as
  * the kernel image.
  */
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
-                                 efi_loaded_image_t *image,
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
                                  char *cmd_line, char *option_string,
                                  unsigned long max_addr,
                                  unsigned long *load_addr,
@@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
        if (!nr_files)
                return EFI_SUCCESS;
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               nr_files * sizeof(*files), (void **)&files);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+                            nr_files * sizeof(*files), (void **)&files);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n");
+               pr_efi_err("Failed to alloc mem for file handle list\n");
                goto fail;
        }
 
@@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
                /* Only open the volume once. */
                if (!i) {
-                       status = efi_open_volume(sys_table_arg, image, &fh);
+                       status = efi_open_volume(image, &fh);
                        if (status != EFI_SUCCESS)
                                goto free_files;
                }
 
-               status = efi_file_size(sys_table_arg, fh, filename_16,
-                                      (void **)&file->handle, &file->size);
+               status = efi_file_size(fh, filename_16, (void **)&file->handle,
+                                      &file->size);
                if (status != EFI_SUCCESS)
                        goto close_handles;
 
@@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                 * so allocate enough memory for all the files.  This is used
                 * for loading multiple files.
                 */
-               status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000,
-                                   &file_addr, max_addr);
+               status = efi_high_alloc(file_size_total, 0x1000, &file_addr,
+                                       max_addr);
                if (status != EFI_SUCCESS) {
-                       pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n");
+                       pr_efi_err("Failed to alloc highmem for files\n");
                        goto close_handles;
                }
 
                /* We've run out of free low memory. */
                if (file_addr > max_addr) {
-                       pr_efi_err(sys_table_arg, "We've run out of free low memory\n");
+                       pr_efi_err("We've run out of free low memory\n");
                        status = EFI_INVALID_PARAMETER;
                        goto free_file_total;
                }
@@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                        while (size) {
                                unsigned long chunksize;
 
-                               if (IS_ENABLED(CONFIG_X86) && size > __chunk_size)
-                                       chunksize = __chunk_size;
+                               if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size)
+                                       chunksize = efi_chunk_size;
                                else
                                        chunksize = size;
 
@@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                                                       &chunksize,
                                                       (void *)addr);
                                if (status != EFI_SUCCESS) {
-                                       pr_efi_err(sys_table_arg, "Failed to read file\n");
+                                       pr_efi_err("Failed to read file\n");
                                        goto free_file_total;
                                }
                                addr += chunksize;
@@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
        }
 
-       efi_call_early(free_pool, files);
+       efi_bs_call(free_pool, files);
 
        *load_addr = file_addr;
        *load_size = file_size_total;
@@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
        return status;
 
 free_file_total:
-       efi_free(sys_table_arg, file_size_total, file_addr);
+       efi_free(file_size_total, file_addr);
 
 close_handles:
        for (k = j; k < i; k++)
                efi_file_close(files[k].handle);
 free_files:
-       efi_call_early(free_pool, files);
+       efi_bs_call(free_pool, files);
 fail:
        *load_addr = 0;
        *load_size = 0;
@@ -707,8 +705,7 @@ fail:
  * address is not available the lowest available address will
  * be used.
  */
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
-                                unsigned long *image_addr,
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
@@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
         * as possible while respecting the required alignment.
         */
        nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
-       status = efi_call_early(allocate_pages,
-                               EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
-                               nr_pages, &efi_addr);
+       status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+                            EFI_LOADER_DATA, nr_pages, &efi_addr);
        new_addr = efi_addr;
        /*
         * If preferred address allocation failed allocate as low as
         * possible.
         */
        if (status != EFI_SUCCESS) {
-               status = efi_low_alloc_above(sys_table_arg, alloc_size,
-                                            alignment, &new_addr, min_addr);
+               status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+                                            min_addr);
        }
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
+               pr_efi_err("Failed to allocate usable memory for kernel.\n");
                return status;
        }
 
@@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
  * Size of memory allocated return in *cmd_line_len.
  * Returns NULL on error.
  */
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
-                         efi_loaded_image_t *image,
+char *efi_convert_cmdline(efi_loaded_image_t *image,
                          int *cmd_line_len)
 {
        const u16 *s2;
@@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
 
        options_bytes++;        /* NUL termination */
 
-       status = efi_high_alloc(sys_table_arg, options_bytes, 0,
-                               &cmdline_addr, MAX_CMDLINE_ADDRESS);
+       status = efi_high_alloc(options_bytes, 0, &cmdline_addr,
+                               MAX_CMDLINE_ADDRESS);
        if (status != EFI_SUCCESS)
                return NULL;
 
@@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
  * specific structure may be passed to the function via priv.  The client
  * function may be called multiple times.
  */
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
-                                   void *handle,
+efi_status_t efi_exit_boot_services(void *handle,
                                    struct efi_boot_memmap *map,
                                    void *priv,
                                    efi_exit_boot_map_processing priv_func)
 {
        efi_status_t status;
 
-       status = efi_get_memory_map(sys_table_arg, map);
+       status = efi_get_memory_map(map);
 
        if (status != EFI_SUCCESS)
                goto fail;
 
-       status = priv_func(sys_table_arg, map, priv);
+       status = priv_func(map, priv);
        if (status != EFI_SUCCESS)
                goto free_map;
 
-       status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+       if (efi_disable_pci_dma)
+               efi_pci_disable_bridge_busmaster();
+
+       status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
 
        if (status == EFI_INVALID_PARAMETER) {
                /*
@@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
                 * to get_memory_map() is expected to succeed here.
                 */
                *map->map_size = *map->buff_size;
-               status = efi_call_early(get_memory_map,
-                                       map->map_size,
-                                       *map->map,
-                                       map->key_ptr,
-                                       map->desc_size,
-                                       map->desc_ver);
+               status = efi_bs_call(get_memory_map,
+                                    map->map_size,
+                                    *map->map,
+                                    map->key_ptr,
+                                    map->desc_size,
+                                    map->desc_ver);
 
                /* exit_boot_services() was called, thus cannot free */
                if (status != EFI_SUCCESS)
                        goto fail;
 
-               status = priv_func(sys_table_arg, map, priv);
+               status = priv_func(map, priv);
                /* exit_boot_services() was called, thus cannot free */
                if (status != EFI_SUCCESS)
                        goto fail;
 
-               status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+               status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
        }
 
        /* exit_boot_services() was called, thus cannot free */
@@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
        return EFI_SUCCESS;
 
 free_map:
-       efi_call_early(free_pool, *map->map);
+       efi_bs_call(free_pool, *map->map);
 fail:
        return status;
 }
 
-#define GET_EFI_CONFIG_TABLE(bits)                                     \
-static void *get_efi_config_table##bits(efi_system_table_t *_sys_table,        \
-                                       efi_guid_t guid)                \
-{                                                                      \
-       efi_system_table_##bits##_t *sys_table;                         \
-       efi_config_table_##bits##_t *tables;                            \
-       int i;                                                          \
-                                                                       \
-       sys_table = (typeof(sys_table))_sys_table;                      \
-       tables = (typeof(tables))(unsigned long)sys_table->tables;      \
-                                                                       \
-       for (i = 0; i < sys_table->nr_tables; i++) {                    \
-               if (efi_guidcmp(tables[i].guid, guid) != 0)             \
-                       continue;                                       \
-                                                                       \
-               return (void *)(unsigned long)tables[i].table;          \
-       }                                                               \
-                                                                       \
-       return NULL;                                                    \
+void *get_efi_config_table(efi_guid_t guid)
+{
+       unsigned long tables = efi_table_attr(efi_system_table(), tables);
+       int nr_tables = efi_table_attr(efi_system_table(), nr_tables);
+       int i;
+
+       for (i = 0; i < nr_tables; i++) {
+               efi_config_table_t *t = (void *)tables;
+
+               if (efi_guidcmp(t->guid, guid) == 0)
+                       return efi_table_attr(t, table);
+
+               tables += efi_is_native() ? sizeof(efi_config_table_t)
+                                         : sizeof(efi_config_table_32_t);
+       }
+       return NULL;
 }
-GET_EFI_CONFIG_TABLE(32)
-GET_EFI_CONFIG_TABLE(64)
 
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+void efi_char16_printk(efi_char16_t *str)
 {
-       if (efi_is_64bit())
-               return get_efi_config_table64(sys_table, guid);
-       else
-               return get_efi_config_table32(sys_table, guid);
+       efi_call_proto(efi_table_attr(efi_system_table(), con_out),
+                      output_string, str);
 }
index 05739ae013c8283e6c7ef5f3134fe77b9df249b7..c244b165005e818dc1df8f6b42479c25b052d4d1 100644 (file)
 #define EFI_ALLOC_ALIGN                EFI_PAGE_SIZE
 #endif
 
-extern int __pure nokaslr(void);
-extern int __pure is_quiet(void);
-extern int __pure novamap(void);
+#ifdef CONFIG_ARM
+#define __efistub_global       __section(.data)
+#else
+#define __efistub_global
+#endif
+
+extern bool __pure nokaslr(void);
+extern bool __pure is_quiet(void);
+extern bool __pure novamap(void);
+
+extern __pure efi_system_table_t  *efi_system_table(void);
 
-#define pr_efi(sys_table, msg)         do {                            \
-       if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg);        \
+#define pr_efi(msg)            do {                    \
+       if (!is_quiet()) efi_printk("EFI stub: "msg);   \
 } while (0)
 
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
+#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg)
 
-void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
+void efi_char16_printk(efi_char16_t *);
 
-unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
+unsigned long get_dram_base(void);
 
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
-                                           void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
                                            unsigned long *new_fdt_addr,
                                            unsigned long max_addr,
                                            u64 initrd_addr, u64 initrd_size,
@@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                                            unsigned long fdt_addr,
                                            unsigned long fdt_size);
 
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size);
+void *get_fdt(unsigned long *fdt_size);
 
 void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
                     unsigned long desc_size, efi_memory_desc_t *runtime_map,
                     int *count);
 
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
-                                 unsigned long size, u8 *out);
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
 
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
-                             unsigned long size, unsigned long align,
+efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
                              unsigned long *addr, unsigned long random_seed);
 
-efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t check_platform_features(void);
 
-void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+void *get_efi_config_table(efi_guid_t guid);
 
 /* Helper macros for the usual case of using simple C variables: */
 #ifndef fdt_setprop_inplace_var
@@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
        fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
 #endif
 
+#define get_efi_var(name, vendor, ...)                         \
+       efi_rt_call(get_variable, (efi_char16_t *)(name),       \
+                   (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...)                         \
+       efi_rt_call(set_variable, (efi_char16_t *)(name),       \
+                   (efi_guid_t *)(vendor), __VA_ARGS__)
+
 #endif
index 0bf0190917e08ebdb6bcae20969a6354c6634347..0a91e52321272a08ac8e6ad7f7acac910eb13248 100644 (file)
@@ -16,7 +16,7 @@
 #define EFI_DT_ADDR_CELLS_DEFAULT 2
 #define EFI_DT_SIZE_CELLS_DEFAULT 2
 
-static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+static void fdt_update_cell_size(void *fdt)
 {
        int offset;
 
@@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
        fdt_setprop_u32(fdt, offset, "#size-cells",    EFI_DT_SIZE_CELLS_DEFAULT);
 }
 
-static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
-                              unsigned long orig_fdt_size,
+static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
                               void *fdt, int new_fdt_size, char *cmdline_ptr,
                               u64 initrd_addr, u64 initrd_size)
 {
@@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        /* Do some checks on provided FDT, if it exists: */
        if (orig_fdt) {
                if (fdt_check_header(orig_fdt)) {
-                       pr_efi_err(sys_table, "Device Tree header not valid!\n");
+                       pr_efi_err("Device Tree header not valid!\n");
                        return EFI_LOAD_ERROR;
                }
                /*
@@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                 * configuration table:
                 */
                if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
-                       pr_efi_err(sys_table, "Truncated device tree! foo!\n");
+                       pr_efi_err("Truncated device tree! foo!\n");
                        return EFI_LOAD_ERROR;
                }
        }
@@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                         * Any failure from the following function is
                         * non-critical:
                         */
-                       fdt_update_cell_size(sys_table, fdt);
+                       fdt_update_cell_size(fdt);
                }
        }
 
@@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
 
        /* Add FDT entries for EFI runtime services in chosen node. */
        node = fdt_subnode_offset(fdt, 0, "chosen");
-       fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
+       fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table());
 
        status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
        if (status)
@@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
                efi_status_t efi_status;
 
-               efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+               efi_status = efi_get_random_bytes(sizeof(fdt_val64),
                                                  (u8 *)&fdt_val64);
                if (efi_status == EFI_SUCCESS) {
                        status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
@@ -210,8 +209,7 @@ struct exit_boot_struct {
        void                    *new_fdt_addr;
 };
 
-static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
-                                  struct efi_boot_memmap *map,
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
                                   void *priv)
 {
        struct exit_boot_struct *p = priv;
@@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
  * with the final memory map in it.
  */
 
-efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
-                                           void *handle,
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
                                            unsigned long *new_fdt_addr,
                                            unsigned long max_addr,
                                            u64 initrd_addr, u64 initrd_size,
@@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
         * subsequent allocations adding entries, since they could not affect
         * the number of EFI_MEMORY_RUNTIME regions.
         */
-       status = efi_get_memory_map(sys_table, &map);
+       status = efi_get_memory_map(&map);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+               pr_efi_err("Unable to retrieve UEFI memory map.\n");
                return status;
        }
 
-       pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
+       pr_efi("Exiting boot services and installing virtual address map...\n");
 
        map.map = &memory_map;
-       status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
+       status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN,
                                new_fdt_addr, max_addr);
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
+               pr_efi_err("Unable to allocate memory for new device tree.\n");
                goto fail;
        }
 
@@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
         * Now that we have done our final memory allocation (and free)
         * we can get the memory map key needed for exit_boot_services().
         */
-       status = efi_get_memory_map(sys_table, &map);
+       status = efi_get_memory_map(&map);
        if (status != EFI_SUCCESS)
                goto fail_free_new_fdt;
 
-       status = update_fdt(sys_table, (void *)fdt_addr, fdt_size,
+       status = update_fdt((void *)fdt_addr, fdt_size,
                            (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
                            initrd_addr, initrd_size);
 
        if (status != EFI_SUCCESS) {
-               pr_efi_err(sys_table, "Unable to construct new device tree.\n");
+               pr_efi_err("Unable to construct new device tree.\n");
                goto fail_free_new_fdt;
        }
 
@@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
        priv.runtime_entry_count        = &runtime_entry_count;
        priv.new_fdt_addr               = (void *)*new_fdt_addr;
 
-       status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
+       status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
@@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                        return EFI_SUCCESS;
 
                /* Install the new virtual address map */
-               svam = sys_table->runtime->set_virtual_address_map;
+               svam = efi_system_table()->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
                              desc_ver, runtime_map);
 
@@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                return EFI_SUCCESS;
        }
 
-       pr_efi_err(sys_table, "Exit boot services failed.\n");
+       pr_efi_err("Exit boot services failed.\n");
 
 fail_free_new_fdt:
-       efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr);
+       efi_free(MAX_FDT_SIZE, *new_fdt_addr);
 
 fail:
-       sys_table->boottime->free_pool(runtime_map);
+       efi_system_table()->boottime->free_pool(runtime_map);
 
        return EFI_LOAD_ERROR;
 }
 
-void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
+void *get_fdt(unsigned long *fdt_size)
 {
        void *fdt;
 
-       fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
+       fdt = get_efi_config_table(DEVICE_TREE_GUID);
 
        if (!fdt)
                return NULL;
 
        if (fdt_check_header(fdt) != 0) {
-               pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+               pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
                return NULL;
        }
        *fdt_size = fdt_totalsize(fdt);
index 0101ca4c13b1a8fc3c81c59be3084ef9b7d80fbf..55e6b3f286fe0bc9e012062e316e8a404d1f2d92 100644 (file)
@@ -10,6 +10,8 @@
 #include <asm/efi.h>
 #include <asm/setup.h>
 
+#include "efistub.h"
+
 static void find_bits(unsigned long mask, u8 *pos, u8 *size)
 {
        u8 first, len;
@@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
 
 static void
 setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
-                struct efi_pixel_bitmask pixel_info, int pixel_format)
+                efi_pixel_bitmask_t pixel_info, int pixel_format)
 {
        if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
                si->lfb_depth = 32;
@@ -83,189 +85,44 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
        }
 }
 
-static efi_status_t
-__gop_query32(efi_system_table_t *sys_table_arg,
-             struct efi_graphics_output_protocol_32 *gop32,
-             struct efi_graphics_output_mode_info **info,
-             unsigned long *size, u64 *fb_base)
-{
-       struct efi_graphics_output_protocol_mode_32 *mode;
-       efi_graphics_output_protocol_query_mode query_mode;
-       efi_status_t status;
-       unsigned long m;
-
-       m = gop32->mode;
-       mode = (struct efi_graphics_output_protocol_mode_32 *)m;
-       query_mode = (void *)(unsigned long)gop32->query_mode;
-
-       status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
-                                 info);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       *fb_base = mode->frame_buffer_base;
-       return status;
-}
-
-static efi_status_t
-setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
-            efi_guid_t *proto, unsigned long size, void **gop_handle)
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+                             unsigned long size, void **handles)
 {
-       struct efi_graphics_output_protocol_32 *gop32, *first_gop;
-       unsigned long nr_gops;
+       efi_graphics_output_protocol_t *gop, *first_gop;
        u16 width, height;
        u32 pixels_per_scan_line;
        u32 ext_lfb_base;
-       u64 fb_base;
-       struct efi_pixel_bitmask pixel_info;
+       efi_physical_addr_t fb_base;
+       efi_pixel_bitmask_t pixel_info;
        int pixel_format;
-       efi_status_t status = EFI_NOT_FOUND;
-       u32 *handles = (u32 *)(unsigned long)gop_handle;
-       int i;
-
-       first_gop = NULL;
-       gop32 = NULL;
-
-       nr_gops = size / sizeof(u32);
-       for (i = 0; i < nr_gops; i++) {
-               struct efi_graphics_output_mode_info *info = NULL;
-               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
-               bool conout_found = false;
-               void *dummy = NULL;
-               efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
-               u64 current_fb_base;
-
-               status = efi_call_early(handle_protocol, h,
-                                       proto, (void **)&gop32);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               status = efi_call_early(handle_protocol, h,
-                                       &conout_proto, &dummy);
-               if (status == EFI_SUCCESS)
-                       conout_found = true;
-
-               status = __gop_query32(sys_table_arg, gop32, &info, &size,
-                                      &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
-                   info->pixel_format != PIXEL_BLT_ONLY) {
-                       /*
-                        * Systems that use the UEFI Console Splitter may
-                        * provide multiple GOP devices, not all of which are
-                        * backed by real hardware. The workaround is to search
-                        * for a GOP implementing the ConOut protocol, and if
-                        * one isn't found, to just fall back to the first GOP.
-                        */
-                       width = info->horizontal_resolution;
-                       height = info->vertical_resolution;
-                       pixel_format = info->pixel_format;
-                       pixel_info = info->pixel_information;
-                       pixels_per_scan_line = info->pixels_per_scan_line;
-                       fb_base = current_fb_base;
-
-                       /*
-                        * Once we've found a GOP supporting ConOut,
-                        * don't bother looking any further.
-                        */
-                       first_gop = gop32;
-                       if (conout_found)
-                               break;
-               }
-       }
-
-       /* Did we find any GOPs? */
-       if (!first_gop)
-               goto out;
-
-       /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
-       si->lfb_width = width;
-       si->lfb_height = height;
-       si->lfb_base = fb_base;
-
-       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
-       if (ext_lfb_base) {
-               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-               si->ext_lfb_base = ext_lfb_base;
-       }
-
-       si->pages = 1;
-
-       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
-       si->lfb_size = si->lfb_linelength * si->lfb_height;
-
-       si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
-       return status;
-}
-
-static efi_status_t
-__gop_query64(efi_system_table_t *sys_table_arg,
-             struct efi_graphics_output_protocol_64 *gop64,
-             struct efi_graphics_output_mode_info **info,
-             unsigned long *size, u64 *fb_base)
-{
-       struct efi_graphics_output_protocol_mode_64 *mode;
-       efi_graphics_output_protocol_query_mode query_mode;
        efi_status_t status;
-       unsigned long m;
-
-       m = gop64->mode;
-       mode = (struct efi_graphics_output_protocol_mode_64 *)m;
-       query_mode = (void *)(unsigned long)gop64->query_mode;
-
-       status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
-                                 info);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       *fb_base = mode->frame_buffer_base;
-       return status;
-}
-
-static efi_status_t
-setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
-           efi_guid_t *proto, unsigned long size, void **gop_handle)
-{
-       struct efi_graphics_output_protocol_64 *gop64, *first_gop;
-       unsigned long nr_gops;
-       u16 width, height;
-       u32 pixels_per_scan_line;
-       u32 ext_lfb_base;
-       u64 fb_base;
-       struct efi_pixel_bitmask pixel_info;
-       int pixel_format;
-       efi_status_t status = EFI_NOT_FOUND;
-       u64 *handles = (u64 *)(unsigned long)gop_handle;
+       efi_handle_t h;
        int i;
 
        first_gop = NULL;
-       gop64 = NULL;
+       gop = NULL;
 
-       nr_gops = size / sizeof(u64);
-       for (i = 0; i < nr_gops; i++) {
-               struct efi_graphics_output_mode_info *info = NULL;
+       for_each_efi_handle(h, handles, size, i) {
+               efi_graphics_output_protocol_mode_t *mode;
+               efi_graphics_output_mode_info_t *info = NULL;
                efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
                bool conout_found = false;
                void *dummy = NULL;
-               efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
-               u64 current_fb_base;
+               efi_physical_addr_t current_fb_base;
 
-               status = efi_call_early(handle_protocol, h,
-                                       proto, (void **)&gop64);
+               status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
                if (status != EFI_SUCCESS)
                        continue;
 
-               status = efi_call_early(handle_protocol, h,
-                                       &conout_proto, &dummy);
+               status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
                if (status == EFI_SUCCESS)
                        conout_found = true;
 
-               status = __gop_query64(sys_table_arg, gop64, &info, &size,
-                                      &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+               mode = efi_table_attr(gop, mode);
+               info = efi_table_attr(mode, info);
+               current_fb_base = efi_table_attr(mode, frame_buffer_base);
+
+               if ((!first_gop || conout_found) &&
                    info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
@@ -285,7 +142,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
                         * Once we've found a GOP supporting ConOut,
                         * don't bother looking any further.
                         */
-                       first_gop = gop64;
+                       first_gop = gop;
                        if (conout_found)
                                break;
                }
@@ -293,7 +150,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
        /* Did we find any GOPs? */
        if (!first_gop)
-               goto out;
+               return EFI_NOT_FOUND;
 
        /* EFI framebuffer */
        si->orig_video_isVGA = VIDEO_TYPE_EFI;
@@ -315,40 +172,32 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
        si->lfb_size = si->lfb_linelength * si->lfb_height;
 
        si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
-       return status;
+
+       return EFI_SUCCESS;
 }
 
 /*
  * See if we have Graphics Output Protocol
  */
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
-                          struct screen_info *si, efi_guid_t *proto,
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
                           unsigned long size)
 {
        efi_status_t status;
        void **gop_handle = NULL;
 
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)&gop_handle);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+                            (void **)&gop_handle);
        if (status != EFI_SUCCESS)
                return status;
 
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               proto, NULL, &size, gop_handle);
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
+                            &size, gop_handle);
        if (status != EFI_SUCCESS)
                goto free_handle;
 
-       if (efi_is_64bit()) {
-               status = setup_gop64(sys_table_arg, si, proto, size,
-                                    gop_handle);
-       } else {
-               status = setup_gop32(sys_table_arg, si, proto, size,
-                                    gop_handle);
-       }
+       status = setup_gop(si, proto, size, gop_handle);
 
 free_handle:
-       efi_call_early(free_pool, gop_handle);
+       efi_bs_call(free_pool, gop_handle);
        return status;
 }
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
new file mode 100644 (file)
index 0000000..b025e59
--- /dev/null
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI-related functions used by the EFI stub on multiple
+ * architectures.
+ *
+ * Copyright 2019 Google, LLC
+ */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+void efi_pci_disable_bridge_busmaster(void)
+{
+       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+       unsigned long pci_handle_size = 0;
+       efi_handle_t *pci_handle = NULL;
+       efi_handle_t handle;
+       efi_status_t status;
+       u16 class, command;
+       int i;
+
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+                            NULL, &pci_handle_size, NULL);
+
+       if (status != EFI_BUFFER_TOO_SMALL) {
+               if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+                       pr_efi_err("Failed to locate PCI I/O handles'\n");
+               return;
+       }
+
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
+                            (void **)&pci_handle);
+       if (status != EFI_SUCCESS) {
+               pr_efi_err("Failed to allocate memory for 'pci_handle'\n");
+               return;
+       }
+
+       status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+                            NULL, &pci_handle_size, pci_handle);
+       if (status != EFI_SUCCESS) {
+               pr_efi_err("Failed to locate PCI I/O handles'\n");
+               goto free_handle;
+       }
+
+       for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+               efi_pci_io_protocol_t *pci;
+               unsigned long segment_nr, bus_nr, device_nr, func_nr;
+
+               status = efi_bs_call(handle_protocol, handle, &pci_proto,
+                                    (void **)&pci);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               /*
+                * Disregard devices living on bus 0 - these are not behind a
+                * bridge so no point in disconnecting them from their drivers.
+                */
+               status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr,
+                                       &device_nr, &func_nr);
+               if (status != EFI_SUCCESS || bus_nr == 0)
+                       continue;
+
+               /*
+                * Don't disconnect VGA controllers so we don't risk losing
+                * access to the framebuffer. Drivers for true PCIe graphics
+                * controllers that are behind a PCIe root port do not use
+                * DMA to implement the GOP framebuffer anyway [although they
+                * may use it in their implentation of Gop->Blt()], and so
+                * disabling DMA in the PCI bridge should not interfere with
+                * normal operation of the device.
+                */
+               status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+                                       PCI_CLASS_DEVICE, 1, &class);
+               if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA)
+                       continue;
+
+               /* Disconnect this handle from all its drivers */
+               efi_bs_call(disconnect_controller, handle, NULL, NULL);
+       }
+
+       for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+               efi_pci_io_protocol_t *pci;
+
+               status = efi_bs_call(handle_protocol, handle, &pci_proto,
+                                    (void **)&pci);
+               if (status != EFI_SUCCESS || !pci)
+                       continue;
+
+               status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+                                       PCI_CLASS_DEVICE, 1, &class);
+
+               if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI)
+                       continue;
+
+               /* Disable busmastering */
+               status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+                                       PCI_COMMAND, 1, &command);
+               if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER))
+                       continue;
+
+               command &= ~PCI_COMMAND_MASTER;
+               status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
+                                       PCI_COMMAND, 1, &command);
+               if (status != EFI_SUCCESS)
+                       pr_efi_err("Failed to disable PCI busmastering\n");
+       }
+
+free_handle:
+       efi_bs_call(free_pool, pci_handle);
+}
index 35edd7cfb6a13d2002101d3e2b6874180d52c3da..316ce9ff01932c30833b71f76e34ac52a4fd4e66 100644 (file)
@@ -9,38 +9,34 @@
 
 #include "efistub.h"
 
-typedef struct efi_rng_protocol efi_rng_protocol_t;
-
-typedef struct {
-       u32 get_info;
-       u32 get_rng;
-} efi_rng_protocol_32_t;
-
-typedef struct {
-       u64 get_info;
-       u64 get_rng;
-} efi_rng_protocol_64_t;
-
-struct efi_rng_protocol {
-       efi_status_t (*get_info)(struct efi_rng_protocol *,
-                                unsigned long *, efi_guid_t *);
-       efi_status_t (*get_rng)(struct efi_rng_protocol *,
-                               efi_guid_t *, unsigned long, u8 *out);
+typedef union efi_rng_protocol efi_rng_protocol_t;
+
+union efi_rng_protocol {
+       struct {
+               efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *,
+                                                 unsigned long *,
+                                                 efi_guid_t *);
+               efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *,
+                                                efi_guid_t *, unsigned long,
+                                                u8 *out);
+       };
+       struct {
+               u32 get_info;
+               u32 get_rng;
+       } mixed_mode;
 };
 
-efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
-                                 unsigned long size, u8 *out)
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
 {
        efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
        efi_status_t status;
-       struct efi_rng_protocol *rng;
+       efi_rng_protocol_t *rng = NULL;
 
-       status = efi_call_early(locate_protocol, &rng_proto, NULL,
-                               (void **)&rng);
+       status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
        if (status != EFI_SUCCESS)
                return status;
 
-       return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out);
+       return efi_call_proto(rng, get_rng, NULL, size, out);
 }
 
 /*
@@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
  */
 #define MD_NUM_SLOTS(md)       ((md)->virt_addr)
 
-efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
-                             unsigned long size,
+efi_status_t efi_random_alloc(unsigned long size,
                              unsigned long align,
                              unsigned long *addr,
                              unsigned long random_seed)
@@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
        map.key_ptr =   NULL;
        map.buff_size = &buff_size;
 
-       status = efi_get_memory_map(sys_table_arg, &map);
+       status = efi_get_memory_map(&map);
        if (status != EFI_SUCCESS)
                return status;
 
@@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
                target = round_up(md->phys_addr, align) + target_slot * align;
                pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
 
-               status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
-                                       EFI_LOADER_DATA, pages, &target);
+               status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+                                    EFI_LOADER_DATA, pages, &target);
                if (status == EFI_SUCCESS)
                        *addr = target;
                break;
        }
 
-       efi_call_early(free_pool, memory_map);
+       efi_bs_call(free_pool, memory_map);
 
        return status;
 }
 
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+efi_status_t efi_random_get_seed(void)
 {
        efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
        efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
        efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
-       struct efi_rng_protocol *rng;
-       struct linux_efi_random_seed *seed;
+       efi_rng_protocol_t *rng = NULL;
+       struct linux_efi_random_seed *seed = NULL;
        efi_status_t status;
 
-       status = efi_call_early(locate_protocol, &rng_proto, NULL,
-                               (void **)&rng);
+       status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
        if (status != EFI_SUCCESS)
                return status;
 
-       status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
-                               sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
-                               (void **)&seed);
+       status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+                            sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
+                            (void **)&seed);
        if (status != EFI_SUCCESS)
                return status;
 
-       status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw,
+       status = efi_call_proto(rng, get_rng, &rng_algo_raw,
                                 EFI_RANDOM_SEED_SIZE, seed->bits);
 
        if (status == EFI_UNSUPPORTED)
@@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
                 * Use whatever algorithm we have available if the raw algorithm
                 * is not implemented.
                 */
-               status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL,
-                                        EFI_RANDOM_SEED_SIZE, seed->bits);
+               status = efi_call_proto(rng, get_rng, NULL,
+                                       EFI_RANDOM_SEED_SIZE, seed->bits);
 
        if (status != EFI_SUCCESS)
                goto err_freepool;
 
        seed->size = EFI_RANDOM_SEED_SIZE;
-       status = efi_call_early(install_configuration_table, &rng_table_guid,
-                               seed);
+       status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
        if (status != EFI_SUCCESS)
                goto err_freepool;
 
        return EFI_SUCCESS;
 
 err_freepool:
-       efi_call_early(free_pool, seed);
+       efi_bs_call(free_pool, seed);
        return status;
 }
index edba5e7a37437a777eed20cb4bf0b15cf4cd7cc4..a765378ad18c726ee18ae3fc3cf07e58412f4c60 100644 (file)
@@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
 static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
 static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
 
-#define get_efi_var(name, vendor, ...) \
-       efi_call_runtime(get_variable, \
-                        (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
-                        __VA_ARGS__);
-
 /*
  * Determine whether we're in secure boot mode.
  *
  * Please keep the logic in sync with
  * arch/x86/xen/efi.c:xen_efi_get_secureboot().
  */
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
+enum efi_secureboot_mode efi_get_secureboot(void)
 {
        u32 attr;
        u8 secboot, setupmode, moksbstate;
@@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
                return efi_secureboot_mode_disabled;
 
 secure_boot_enabled:
-       pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n");
+       pr_efi("UEFI Secure Boot is enabled.\n");
        return efi_secureboot_mode_enabled;
 
 out_efi_err:
-       pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
+       pr_efi_err("Could not determine UEFI Secure Boot status.\n");
        return efi_secureboot_mode_unknown;
 }
index eb9af83e4d5914d83decda4c5503dec6eaab0daa..1d59e103a2e3a61aae5b29d99a5c3a25e7784a9b 100644 (file)
@@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
 #define MEMORY_ONLY_RESET_CONTROL_GUID \
        EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
 
-#define get_efi_var(name, vendor, ...) \
-       efi_call_runtime(get_variable, \
-                        (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
-                        __VA_ARGS__)
-
-#define set_efi_var(name, vendor, ...) \
-       efi_call_runtime(set_variable, \
-                        (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
-                        __VA_ARGS__)
-
 /*
  * Enable reboot attack mitigation. This requests that the firmware clear the
  * RAM on next reboot before proceeding with boot, ensuring that any secrets
  * are cleared. If userland has ensured that all secrets have been removed
  * from RAM before reboot it can simply reset this variable.
  */
-void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+void efi_enable_reset_attack_mitigation(void)
 {
        u8 val = 1;
        efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
@@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
 
 #endif
 
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(void)
 {
        efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
        efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
@@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
        size_t log_size, last_entry_size;
        efi_bool_t truncated;
        int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
-       void *tcg2_protocol = NULL;
+       efi_tcg2_protocol_t *tcg2_protocol = NULL;
        int final_events_size = 0;
 
-       status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
-                               &tcg2_protocol);
+       status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
+                            (void **)&tcg2_protocol);
        if (status != EFI_SUCCESS)
                return;
 
-       status = efi_call_proto(efi_tcg2_protocol, get_event_log,
-                               tcg2_protocol, version, &log_location,
-                               &log_last_entry, &truncated);
+       status = efi_call_proto(tcg2_protocol, get_event_log, version,
+                               &log_location, &log_last_entry, &truncated);
 
        if (status != EFI_SUCCESS || !log_location) {
                version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
-               status = efi_call_proto(efi_tcg2_protocol, get_event_log,
-                                       tcg2_protocol, version, &log_location,
-                                       &log_last_entry, &truncated);
+               status = efi_call_proto(tcg2_protocol, get_event_log, version,
+                                       &log_location, &log_last_entry,
+                                       &truncated);
                if (status != EFI_SUCCESS || !log_location)
                        return;
 
@@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
        }
 
        /* Allocate space for the logs and copy them. */
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               sizeof(*log_tbl) + log_size,
-                               (void **) &log_tbl);
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+                            sizeof(*log_tbl) + log_size, (void **)&log_tbl);
 
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg,
-                          "Unable to allocate memory for event log\n");
+               efi_printk("Unable to allocate memory for event log\n");
                return;
        }
 
@@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
         * Figure out whether any events have already been logged to the
         * final events structure, and if so how much space they take up
         */
-       final_events_table = get_efi_config_table(sys_table_arg,
-                                               LINUX_EFI_TPM_FINAL_LOG_GUID);
+       final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
        if (final_events_table && final_events_table->nr_events) {
                struct tcg_pcr_event2_head *header;
                int offset;
@@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
        log_tbl->version = version;
        memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
 
-       status = efi_call_early(install_configuration_table,
-                               &linux_eventlog_guid, log_tbl);
+       status = efi_bs_call(install_configuration_table,
+                            &linux_eventlog_guid, log_tbl);
        if (status != EFI_SUCCESS)
                goto err_free;
        return;
 
 err_free:
-       efi_call_early(free_pool, log_tbl);
+       efi_bs_call(free_pool, log_tbl);
 }
index 38b686c67b177da4875b9174c0f50ca165c6b2d4..2ff1883dc788d2954761e894e6edfd267a576535 100644 (file)
@@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
        return PFN_PHYS(page_to_pfn(p));
 }
 
+void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
+{
+       if (flags & EFI_MEMMAP_MEMBLOCK) {
+               if (slab_is_available())
+                       memblock_free_late(phys, size);
+               else
+                       memblock_free(phys, size);
+       } else if (flags & EFI_MEMMAP_SLAB) {
+               struct page *p = pfn_to_page(PHYS_PFN(phys));
+               unsigned int order = get_order(size);
+
+               free_pages((unsigned long) page_address(p), order);
+       }
+}
+
+static void __init efi_memmap_free(void)
+{
+       __efi_memmap_free(efi.memmap.phys_map,
+                       efi.memmap.desc_size * efi.memmap.nr_map,
+                       efi.memmap.flags);
+}
+
 /**
  * efi_memmap_alloc - Allocate memory for the EFI memory map
  * @num_entries: Number of entries in the allocated map.
+ * @data: efi memmap installation parameters
  *
  * Depending on whether mm_init() has already been invoked or not,
  * either memblock or "normal" page allocation is used.
@@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
  * Returns the physical address of the allocated memory map on
  * success, zero on failure.
  */
-phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+int __init efi_memmap_alloc(unsigned int num_entries,
+               struct efi_memory_map_data *data)
 {
-       unsigned long size = num_entries * efi.memmap.desc_size;
-
-       if (slab_is_available())
-               return __efi_memmap_alloc_late(size);
+       /* Expect allocation parameters are zero initialized */
+       WARN_ON(data->phys_map || data->size);
+
+       data->size = num_entries * efi.memmap.desc_size;
+       data->desc_version = efi.memmap.desc_version;
+       data->desc_size = efi.memmap.desc_size;
+       data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
+       data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
+
+       if (slab_is_available()) {
+               data->flags |= EFI_MEMMAP_SLAB;
+               data->phys_map = __efi_memmap_alloc_late(data->size);
+       } else {
+               data->flags |= EFI_MEMMAP_MEMBLOCK;
+               data->phys_map = __efi_memmap_alloc_early(data->size);
+       }
 
-       return __efi_memmap_alloc_early(size);
+       if (!data->phys_map)
+               return -ENOMEM;
+       return 0;
 }
 
 /**
  * __efi_memmap_init - Common code for mapping the EFI memory map
  * @data: EFI memory map data
- * @late: Use early or late mapping function?
  *
  * This function takes care of figuring out which function to use to
  * map the EFI memory map in efi.memmap based on how far into the boot
  * we are.
  *
- * During bootup @late should be %false since we only have access to
- * the early_memremap*() functions as the vmalloc space isn't setup.
- * Once the kernel is fully booted we can fallback to the more robust
- * memremap*() API.
+ * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
+ * only have access to the early_memremap*() functions as the vmalloc
+ * space isn't setup.  Once the kernel is fully booted we can fallback
+ * to the more robust memremap*() API.
  *
  * Returns zero on success, a negative error code on failure.
  */
-static int __init
-__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+static int __init __efi_memmap_init(struct efi_memory_map_data *data)
 {
        struct efi_memory_map map;
        phys_addr_t phys_map;
@@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
 
        phys_map = data->phys_map;
 
-       if (late)
+       if (data->flags & EFI_MEMMAP_LATE)
                map.map = memremap(phys_map, data->size, MEMREMAP_WB);
        else
                map.map = early_memremap(phys_map, data->size);
@@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
                return -ENOMEM;
        }
 
+       /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
+       efi_memmap_free();
+
        map.phys_map = data->phys_map;
        map.nr_map = data->size / data->desc_size;
        map.map_end = map.map + data->size;
 
        map.desc_version = data->desc_version;
        map.desc_size = data->desc_size;
-       map.late = late;
+       map.flags = data->flags;
 
        set_bit(EFI_MEMMAP, &efi.flags);
 
@@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late)
 int __init efi_memmap_init_early(struct efi_memory_map_data *data)
 {
        /* Cannot go backwards */
-       WARN_ON(efi.memmap.late);
+       WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
 
-       return __efi_memmap_init(data, false);
+       data->flags = 0;
+       return __efi_memmap_init(data);
 }
 
 void __init efi_memmap_unmap(void)
@@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void)
        if (!efi_enabled(EFI_MEMMAP))
                return;
 
-       if (!efi.memmap.late) {
+       if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
                unsigned long size;
 
                size = efi.memmap.desc_size * efi.memmap.nr_map;
@@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
        struct efi_memory_map_data data = {
                .phys_map = addr,
                .size = size,
+               .flags = EFI_MEMMAP_LATE,
        };
 
        /* Did we forget to unmap the early EFI memmap? */
        WARN_ON(efi.memmap.map);
 
        /* Were we already called? */
-       WARN_ON(efi.memmap.late);
+       WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
 
        /*
         * It makes no sense to allow callers to register different
@@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
        data.desc_version = efi.memmap.desc_version;
        data.desc_size = efi.memmap.desc_size;
 
-       return __efi_memmap_init(&data, true);
+       return __efi_memmap_init(&data);
 }
 
 /**
  * efi_memmap_install - Install a new EFI memory map in efi.memmap
- * @addr: Physical address of the memory map
- * @nr_map: Number of entries in the memory map
+ * @ctx: map allocation parameters (address, size, flags)
  *
  * Unlike efi_memmap_init_*(), this function does not allow the caller
  * to switch from early to late mappings. It simply uses the existing
@@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
  *
  * Returns zero on success, a negative error code on failure.
  */
-int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
+int __init efi_memmap_install(struct efi_memory_map_data *data)
 {
-       struct efi_memory_map_data data;
-
        efi_memmap_unmap();
 
-       data.phys_map = addr;
-       data.size = efi.memmap.desc_size * nr_map;
-       data.desc_version = efi.memmap.desc_version;
-       data.desc_size = efi.memmap.desc_size;
-
-       return __efi_memmap_init(&data, efi.memmap.late);
+       return __efi_memmap_init(data);
 }
 
 /**
index 76b0c354a0277ecd1cb3531fe6ca60ef3eed3eb0..de1a9a1f9f146743e1aff99fc50b36aa49a6e162 100644 (file)
@@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void)
        struct kobject *tables_kobj;
        int ret = -ENOMEM;
 
+       if (rci2_table_phys == EFI_INVALID_TABLE_ADDR)
+               return 0;
+
        rci2_base = memremap(rci2_table_phys,
                             sizeof(struct rci2_table_global_hdr),
                             MEMREMAP_WB);
index 8adffd42f8cb0559031837899b25c7f6ba23c313..f57d95a3db02622835f0a3c1699bda71d08950e3 100644 (file)
@@ -479,6 +479,15 @@ config GPIO_SAMA5D2_PIOBU
          The difference from regular GPIOs is that they
          maintain their value during backup/self-refresh.
 
+config GPIO_SIFIVE
+       bool "SiFive GPIO support"
+       depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+       select GPIO_GENERIC
+       select GPIOLIB_IRQCHIP
+       select REGMAP_MMIO
+       help
+         Say yes here to support the GPIO device on SiFive SoCs.
+
 config GPIO_SIOX
        tristate "SIOX GPIO support"
        depends on SIOX
@@ -553,8 +562,8 @@ config GPIO_TEGRA
 
 config GPIO_TEGRA186
        tristate "NVIDIA Tegra186 GPIO support"
-       default ARCH_TEGRA_186_SOC
-       depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+       default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
+       depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
        depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
@@ -573,7 +582,6 @@ config GPIO_THUNDERX
        tristate "Cavium ThunderX/OCTEON-TX GPIO"
        depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
        depends on PCI_MSI
-       select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        select IRQ_FASTEOI_HIERARCHY_HANDLERS
        help
@@ -1148,6 +1156,7 @@ config GPIO_MADERA
 config GPIO_MAX77620
        tristate "GPIO support for PMIC MAX77620 and MAX20024"
        depends on MFD_MAX77620
+       select GPIOLIB_IRQCHIP
        help
          GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
          MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
index 34eb8b2b12dd656c9e00e4ded6fa3881e015f68b..11eeeebbde0daacf898cb8c98c35eb7784c6d67c 100644 (file)
@@ -124,6 +124,7 @@ obj-$(CONFIG_ARCH_SA1100)           += gpio-sa1100.o
 obj-$(CONFIG_GPIO_SAMA5D2_PIOBU)       += gpio-sama5d2-piobu.o
 obj-$(CONFIG_GPIO_SCH311X)             += gpio-sch311x.o
 obj-$(CONFIG_GPIO_SCH)                 += gpio-sch.o
+obj-$(CONFIG_GPIO_SIFIVE)              += gpio-sifive.o
 obj-$(CONFIG_GPIO_SIOX)                        += gpio-siox.o
 obj-$(CONFIG_GPIO_SODAVILLE)           += gpio-sodaville.o
 obj-$(CONFIG_GPIO_SPEAR_SPICS)         += gpio-spear-spics.o
index 7e99860ca447ed5f5a345d5cb070955d5cbb3c60..8319812593e31295c40638e3667cfa5a1fda2234 100644 (file)
@@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
                return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
        default:
                /* acturally if code runs to here, it's an error case */
-               BUG_ON(1);
+               BUG();
        }
 }
 
index 56d647a30e3eafee0178b6d416952e697635a745..94b8d3ae27bc306f13d981f0027f43a75a403e81 100644 (file)
@@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
        mutex_lock(&chip->lock);
 
        if (test_bit(FLAG_REQUESTED, &desc->flags) &&
-               !test_bit(FLAG_IS_OUT, &desc->flags)) {
+           !test_bit(FLAG_IS_OUT, &desc->flags)) {
                curr = __gpio_mockup_get(chip, offset);
                if (curr == value)
                        goto out;
@@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
                irq_type = irq_get_trigger_type(irq);
 
                if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
-                       (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+                   (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
                        irq_sim_fire(sim, offset);
        }
 
@@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
        int direction;
 
        mutex_lock(&chip->lock);
-       direction = !chip->lines[offset].dir;
+       direction = chip->lines[offset].dir;
        mutex_unlock(&chip->lock);
 
        return direction;
@@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
        struct gpio_chip *gc;
        struct device *dev;
        const char *name;
-       int rv, base;
+       int rv, base, i;
        u16 ngpio;
 
        dev = &pdev->dev;
@@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
        if (!chip->lines)
                return -ENOMEM;
 
+       for (i = 0; i < gc->ngpio; i++)
+               chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
+
        if (device_property_read_bool(dev, "named-gpio-lines")) {
                rv = gpio_mockup_name_lines(dev, chip);
                if (rv)
index f1e164cecff80375ddcfa89017d15fe2af277886..5ae30de3490ac84118da2da0b169f8f24b9da4b9 100644 (file)
@@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        gc = &mpc8xxx_gc->gc;
+       gc->parent = &pdev->dev;
 
        if (of_property_read_bool(np, "little-endian")) {
                ret = bgpio_init(gc, &pdev->dev, 4,
index 6652bee01966dc1e2c883c0909820b374599f293..9853547e72766678e35f2f4fbab9e1fb908675bb 100644 (file)
@@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
+       clear_bit(hwirq, chip->irq_mask);
 }
 
 static void pca953x_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
+       set_bit(hwirq, chip->irq_mask);
 }
 
 static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       int bank_nb = d->hwirq / BANK_SZ;
-       u8 mask = BIT(d->hwirq % BANK_SZ);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
        if (!(type & IRQ_TYPE_EDGE_BOTH)) {
                dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
                return -EINVAL;
        }
 
-       if (type & IRQ_TYPE_EDGE_FALLING)
-               chip->irq_trig_fall[bank_nb] |= mask;
-       else
-               chip->irq_trig_fall[bank_nb] &= ~mask;
-
-       if (type & IRQ_TYPE_EDGE_RISING)
-               chip->irq_trig_raise[bank_nb] |= mask;
-       else
-               chip->irq_trig_raise[bank_nb] &= ~mask;
+       assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
+       assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
 
        return 0;
 }
@@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct pca953x_chip *chip = gpiochip_get_data(gc);
-       u8 mask = BIT(d->hwirq % BANK_SZ);
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-       chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
-       chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
+       clear_bit(hwirq, chip->irq_trig_raise);
+       clear_bit(hwirq, chip->irq_trig_fall);
 }
 
 static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
new file mode 100644 (file)
index 0000000..147a1bd
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+
+#define SIFIVE_GPIO_INPUT_VAL  0x00
+#define SIFIVE_GPIO_INPUT_EN   0x04
+#define SIFIVE_GPIO_OUTPUT_EN  0x08
+#define SIFIVE_GPIO_OUTPUT_VAL 0x0C
+#define SIFIVE_GPIO_RISE_IE    0x18
+#define SIFIVE_GPIO_RISE_IP    0x1C
+#define SIFIVE_GPIO_FALL_IE    0x20
+#define SIFIVE_GPIO_FALL_IP    0x24
+#define SIFIVE_GPIO_HIGH_IE    0x28
+#define SIFIVE_GPIO_HIGH_IP    0x2C
+#define SIFIVE_GPIO_LOW_IE     0x30
+#define SIFIVE_GPIO_LOW_IP     0x34
+#define SIFIVE_GPIO_OUTPUT_XOR 0x40
+
+#define SIFIVE_GPIO_MAX                32
+#define SIFIVE_GPIO_IRQ_OFFSET 7
+
+struct sifive_gpio {
+       void __iomem            *base;
+       struct gpio_chip        gc;
+       struct regmap           *regs;
+       u32                     irq_state;
+       unsigned int            trigger[SIFIVE_GPIO_MAX];
+       unsigned int            irq_parent[SIFIVE_GPIO_MAX];
+};
+
+static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
+{
+       unsigned long flags;
+       unsigned int trigger;
+
+       spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+       trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
+       regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
+                          (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
+       regmap_update_bits(chip->regs, SIFIVE_GPIO_FALL_IE, BIT(offset),
+                          (trigger & IRQ_TYPE_EDGE_FALLING) ? BIT(offset) : 0);
+       regmap_update_bits(chip->regs, SIFIVE_GPIO_HIGH_IE, BIT(offset),
+                          (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
+       regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
+                          (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
+       spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
+}
+
+static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct sifive_gpio *chip = gpiochip_get_data(gc);
+       int offset = irqd_to_hwirq(d);
+
+       if (offset < 0 || offset >= gc->ngpio)
+               return -EINVAL;
+
+       chip->trigger[offset] = trigger;
+       sifive_gpio_set_ie(chip, offset);
+       return 0;
+}
+
+static void sifive_gpio_irq_enable(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct sifive_gpio *chip = gpiochip_get_data(gc);
+       int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+       u32 bit = BIT(offset);
+       unsigned long flags;
+
+       irq_chip_enable_parent(d);
+
+       /* Switch to input */
+       gc->direction_input(gc, offset);
+
+       spin_lock_irqsave(&gc->bgpio_lock, flags);
+       /* Clear any sticky pending interrupts */
+       regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+       regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+       regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+       regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+       spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+       /* Enable interrupts */
+       assign_bit(offset, (unsigned long *)&chip->irq_state, 1);
+       sifive_gpio_set_ie(chip, offset);
+}
+
+static void sifive_gpio_irq_disable(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct sifive_gpio *chip = gpiochip_get_data(gc);
+       int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+
+       assign_bit(offset, (unsigned long *)&chip->irq_state, 0);
+       sifive_gpio_set_ie(chip, offset);
+       irq_chip_disable_parent(d);
+}
+
+static void sifive_gpio_irq_eoi(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct sifive_gpio *chip = gpiochip_get_data(gc);
+       int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
+       u32 bit = BIT(offset);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gc->bgpio_lock, flags);
+       /* Clear all pending interrupts */
+       regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+       regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+       regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+       regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+       spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+       irq_chip_eoi_parent(d);
+}
+
+static struct irq_chip sifive_gpio_irqchip = {
+       .name           = "sifive-gpio",
+       .irq_set_type   = sifive_gpio_irq_set_type,
+       .irq_mask       = irq_chip_mask_parent,
+       .irq_unmask     = irq_chip_unmask_parent,
+       .irq_enable     = sifive_gpio_irq_enable,
+       .irq_disable    = sifive_gpio_irq_disable,
+       .irq_eoi        = sifive_gpio_irq_eoi,
+};
+
+static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+                                            unsigned int child,
+                                            unsigned int child_type,
+                                            unsigned int *parent,
+                                            unsigned int *parent_type)
+{
+       *parent_type = IRQ_TYPE_NONE;
+       *parent = child + SIFIVE_GPIO_IRQ_OFFSET;
+       return 0;
+}
+
+static const struct regmap_config sifive_gpio_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .fast_io = true,
+       .disable_locking = true,
+};
+
+static int sifive_gpio_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = pdev->dev.of_node;
+       struct device_node *irq_parent;
+       struct irq_domain *parent;
+       struct gpio_irq_chip *girq;
+       struct sifive_gpio *chip;
+       int ret, ngpio;
+
+       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(chip->base)) {
+               dev_err(dev, "failed to allocate device memory\n");
+               return PTR_ERR(chip->base);
+       }
+
+       chip->regs = devm_regmap_init_mmio(dev, chip->base,
+                                          &sifive_gpio_regmap_config);
+       if (IS_ERR(chip->regs))
+               return PTR_ERR(chip->regs);
+
+       ngpio = of_irq_count(node);
+       if (ngpio >= SIFIVE_GPIO_MAX) {
+               dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
+                       SIFIVE_GPIO_MAX);
+               return -ENXIO;
+       }
+
+       irq_parent = of_irq_find_parent(node);
+       if (!irq_parent) {
+               dev_err(dev, "no IRQ parent node\n");
+               return -ENODEV;
+       }
+       parent = irq_find_host(irq_parent);
+       if (!parent) {
+               dev_err(dev, "no IRQ parent domain\n");
+               return -ENODEV;
+       }
+
+       ret = bgpio_init(&chip->gc, dev, 4,
+                        chip->base + SIFIVE_GPIO_INPUT_VAL,
+                        chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+                        NULL,
+                        chip->base + SIFIVE_GPIO_OUTPUT_EN,
+                        chip->base + SIFIVE_GPIO_INPUT_EN,
+                        0);
+       if (ret) {
+               dev_err(dev, "unable to init generic GPIO\n");
+               return ret;
+       }
+
+       /* Disable all GPIO interrupts before enabling parent interrupts */
+       regmap_write(chip->regs, SIFIVE_GPIO_RISE_IE, 0);
+       regmap_write(chip->regs, SIFIVE_GPIO_FALL_IE, 0);
+       regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IE, 0);
+       regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
+       chip->irq_state = 0;
+
+       chip->gc.base = -1;
+       chip->gc.ngpio = ngpio;
+       chip->gc.label = dev_name(dev);
+       chip->gc.parent = dev;
+       chip->gc.owner = THIS_MODULE;
+       girq = &chip->gc.irq;
+       girq->chip = &sifive_gpio_irqchip;
+       girq->fwnode = of_node_to_fwnode(node);
+       girq->parent_domain = parent;
+       girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq;
+       girq->handler = handle_bad_irq;
+       girq->default_type = IRQ_TYPE_NONE;
+
+       platform_set_drvdata(pdev, chip);
+       return gpiochip_add_data(&chip->gc, chip);
+}
+
+static const struct of_device_id sifive_gpio_match[] = {
+       { .compatible = "sifive,gpio0" },
+       { .compatible = "sifive,fu540-c000-gpio" },
+       { },
+};
+
+static struct platform_driver sifive_gpio_driver = {
+       .probe          = sifive_gpio_probe,
+       .driver = {
+               .name   = "sifive_gpio",
+               .of_match_table = of_match_ptr(sifive_gpio_match),
+       },
+};
+builtin_platform_driver(sifive_gpio_driver)
index d08d86a22b1f531854b85d571870a6b99b4ab359..46277047904571d2bd12f34d7a66b79bb349db4c 100644 (file)
@@ -53,6 +53,7 @@ struct thunderx_line {
 struct thunderx_gpio {
        struct gpio_chip        chip;
        u8 __iomem              *register_base;
+       struct irq_domain       *irqd;
        struct msix_entry       *msix_entries;  /* per line MSI-X */
        struct thunderx_line    *line_entries;  /* per line irq info */
        raw_spinlock_t          lock;
@@ -285,60 +286,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
        }
 }
 
-static void thunderx_gpio_irq_ack(struct irq_data *d)
+static void thunderx_gpio_irq_ack(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_INTR,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static void thunderx_gpio_irq_mask(struct irq_data *d)
+static void thunderx_gpio_irq_mask(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_ENA_W1C,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static void thunderx_gpio_irq_mask_ack(struct irq_data *d)
+static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static void thunderx_gpio_irq_unmask(struct irq_data *d)
+static void thunderx_gpio_irq_unmask(struct irq_data *data)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
 
        writeq(GPIO_INTR_ENA_W1S,
-              txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+              txline->txgpio->register_base + intr_reg(txline->line));
 }
 
-static int thunderx_gpio_irq_set_type(struct irq_data *d,
+static int thunderx_gpio_irq_set_type(struct irq_data *data,
                                      unsigned int flow_type)
 {
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-       struct thunderx_line *txline =
-               &txgpio->line_entries[irqd_to_hwirq(d)];
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct thunderx_gpio *txgpio = txline->txgpio;
        u64 bit_cfg;
 
-       irqd_set_trigger_type(d, flow_type);
+       irqd_set_trigger_type(data, flow_type);
 
        bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
 
        if (flow_type & IRQ_TYPE_EDGE_BOTH) {
-               irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+               irq_set_handler_locked(data, handle_fasteoi_ack_irq);
                bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
        } else {
-               irq_set_handler_locked(d, handle_fasteoi_mask_irq);
+               irq_set_handler_locked(data, handle_fasteoi_mask_irq);
        }
 
        raw_spin_lock(&txgpio->lock);
@@ -367,6 +362,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data)
        irq_chip_disable_parent(data);
 }
 
+static int thunderx_gpio_irq_request_resources(struct irq_data *data)
+{
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct thunderx_gpio *txgpio = txline->txgpio;
+       int r;
+
+       r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
+       if (r)
+               return r;
+
+       r = irq_chip_request_resources_parent(data);
+       if (r)
+               gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+
+       return r;
+}
+
+static void thunderx_gpio_irq_release_resources(struct irq_data *data)
+{
+       struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+       struct thunderx_gpio *txgpio = txline->txgpio;
+
+       irq_chip_release_resources_parent(data);
+
+       gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+}
+
 /*
  * Interrupts are chained from underlying MSI-X vectors.  We have
  * these irq_chip functions to be able to handle level triggering
@@ -383,24 +405,50 @@ static struct irq_chip thunderx_gpio_irq_chip = {
        .irq_unmask             = thunderx_gpio_irq_unmask,
        .irq_eoi                = irq_chip_eoi_parent,
        .irq_set_affinity       = irq_chip_set_affinity_parent,
+       .irq_request_resources  = thunderx_gpio_irq_request_resources,
+       .irq_release_resources  = thunderx_gpio_irq_release_resources,
        .irq_set_type           = thunderx_gpio_irq_set_type,
 
        .flags                  = IRQCHIP_SET_TYPE_MASKED
 };
 
-static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
-                                              unsigned int child,
-                                              unsigned int child_type,
-                                              unsigned int *parent,
-                                              unsigned int *parent_type)
+static int thunderx_gpio_irq_translate(struct irq_domain *d,
+                                      struct irq_fwspec *fwspec,
+                                      irq_hw_number_t *hwirq,
+                                      unsigned int *type)
 {
-       struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-
-       *parent = txgpio->base_msi + (2 * child);
-       *parent_type = IRQ_TYPE_LEVEL_HIGH;
+       struct thunderx_gpio *txgpio = d->host_data;
+
+       if (WARN_ON(fwspec->param_count < 2))
+               return -EINVAL;
+       if (fwspec->param[0] >= txgpio->chip.ngpio)
+               return -EINVAL;
+       *hwirq = fwspec->param[0];
+       *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
        return 0;
 }
 
+static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
+                                  unsigned int nr_irqs, void *arg)
+{
+       struct thunderx_line *txline = arg;
+
+       return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
+                                            &thunderx_gpio_irq_chip, txline);
+}
+
+static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
+       .alloc          = thunderx_gpio_irq_alloc,
+       .translate      = thunderx_gpio_irq_translate
+};
+
+static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+       struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+       return irq_find_mapping(txgpio->irqd, offset);
+}
+
 static int thunderx_gpio_probe(struct pci_dev *pdev,
                               const struct pci_device_id *id)
 {
@@ -408,7 +456,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        struct device *dev = &pdev->dev;
        struct thunderx_gpio *txgpio;
        struct gpio_chip *chip;
-       struct gpio_irq_chip *girq;
        int ngpio, i;
        int err = 0;
 
@@ -453,8 +500,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        }
 
        txgpio->msix_entries = devm_kcalloc(dev,
-                                           ngpio, sizeof(struct msix_entry),
-                                           GFP_KERNEL);
+                                         ngpio, sizeof(struct msix_entry),
+                                         GFP_KERNEL);
        if (!txgpio->msix_entries) {
                err = -ENOMEM;
                goto out;
@@ -495,6 +542,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        if (err < 0)
                goto out;
 
+       /*
+        * Push GPIO specific irqdomain on hierarchy created as a side
+        * effect of the pci_enable_msix()
+        */
+       txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
+                                                  0, 0, of_node_to_fwnode(dev->of_node),
+                                                  &thunderx_gpio_irqd_ops, txgpio);
+       if (!txgpio->irqd) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       /* Push on irq_data and the domain for each line. */
+       for (i = 0; i < ngpio; i++) {
+               err = irq_domain_push_irq(txgpio->irqd,
+                                         txgpio->msix_entries[i].vector,
+                                         &txgpio->line_entries[i]);
+               if (err < 0)
+                       dev_err(dev, "irq_domain_push_irq: %d\n", err);
+       }
+
        chip->label = KBUILD_MODNAME;
        chip->parent = dev;
        chip->owner = THIS_MODULE;
@@ -509,28 +577,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
        chip->set = thunderx_gpio_set;
        chip->set_multiple = thunderx_gpio_set_multiple;
        chip->set_config = thunderx_gpio_set_config;
-       girq = &chip->irq;
-       girq->chip = &thunderx_gpio_irq_chip;
-       girq->fwnode = of_node_to_fwnode(dev->of_node);
-       girq->parent_domain =
-               irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
-       girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
-       girq->handler = handle_bad_irq;
-       girq->default_type = IRQ_TYPE_NONE;
-
+       chip->to_irq = thunderx_gpio_to_irq;
        err = devm_gpiochip_add_data(dev, chip, txgpio);
        if (err)
                goto out;
 
-       /* Push on irq_data and the domain for each line. */
-       for (i = 0; i < ngpio; i++) {
-               err = irq_domain_push_irq(chip->irq.domain,
-                                         txgpio->msix_entries[i].vector,
-                                         chip);
-               if (err < 0)
-                       dev_err(dev, "irq_domain_push_irq: %d\n", err);
-       }
-
        dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
                 ngpio, chip->base);
        return 0;
@@ -545,10 +596,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
        struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
 
        for (i = 0; i < txgpio->chip.ngpio; i++)
-               irq_domain_pop_irq(txgpio->chip.irq.domain,
+               irq_domain_pop_irq(txgpio->irqd,
                                   txgpio->msix_entries[i].vector);
 
-       irq_domain_remove(txgpio->chip.irq.domain);
+       irq_domain_remove(txgpio->irqd);
 
        pci_set_drvdata(pdev, NULL);
 }
index 773e5c24309e49b3f989a453df40373311afc15b..b21c2e436b61023ca38a83a1e5b690359f2191b7 100644 (file)
@@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit iproc_gpio_remove(struct platform_device *pdev)
+static int iproc_gpio_remove(struct platform_device *pdev)
 {
        struct iproc_gpio_chip *chip;
 
index 08d7c3b3203869b5dd75cc42a4355746ea19ec48..c8af34a6368f4f69f9960fff7f8c27a2c8ddea8b 100644 (file)
@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
        unsigned long flags;
 
        local_irq_save(flags);
-       RSR_CPENABLE(*cpenable);
-       WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
-
+       *cpenable = xtensa_get_sr(cpenable);
+       xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
        return flags;
 }
 
 static inline void disable_cp(unsigned long flags, unsigned long cpenable)
 {
-       WSR_CPENABLE(cpenable);
+       xtensa_set_sr(cpenable, cpenable);
        local_irq_restore(flags);
 }
 
index 4c3f6370eab4c104a8975356ea0f652e3f92372e..05ba16fffdad0e7e285a6cf520f87ff7630a917e 100644 (file)
@@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
        unsigned int bank_num;
 
        for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+               writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
+                               ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
                writel_relaxed(gpio->context.datalsw[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
@@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
                writel_relaxed(gpio->context.dirm[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_DIRM_OFFSET(bank_num));
-               writel_relaxed(gpio->context.int_en[bank_num],
-                              gpio->base_addr +
-                              ZYNQ_GPIO_INTEN_OFFSET(bank_num));
                writel_relaxed(gpio->context.int_type[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
@@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
                writel_relaxed(gpio->context.int_any[bank_num],
                               gpio->base_addr +
                               ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+               writel_relaxed(~(gpio->context.int_en[bank_num]),
+                              gpio->base_addr +
+                              ZYNQ_GPIO_INTEN_OFFSET(bank_num));
        }
 }
 
index d30e57dc755cf6114d984b65cf56175e78757bb9..31fee5e918b7d14fafde0477231316fee14dbb53 100644 (file)
 #include "gpiolib.h"
 #include "gpiolib-acpi.h"
 
+#define QUIRK_NO_EDGE_EVENTS_ON_BOOT           0x01l
+#define QUIRK_NO_WAKEUP                                0x02l
+
 static int run_edge_events_on_boot = -1;
 module_param(run_edge_events_on_boot, int, 0444);
 MODULE_PARM_DESC(run_edge_events_on_boot,
                 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
 
+static int honor_wakeup = -1;
+module_param(honor_wakeup, int, 0444);
+MODULE_PARM_DESC(honor_wakeup,
+                "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
  *
@@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        event->handle = evt_handle;
        event->handler = handler;
        event->irq = irq;
-       event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+       event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
        event->pin = pin;
        event->desc = desc;
 
@@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
 /* We must use _sync so that this runs after the first deferred_probe run */
 late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
 
-static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] = {
        {
                /*
                 * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
-               }
+               },
+               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
        },
        {
                /*
@@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
-               }
+               },
+               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+       },
+       {
+               /*
+                * Various HP X2 10 Cherry Trail models use an external
+                * embedded-controller connected via I2C + an ACPI GPIO
+                * event handler. The embedded controller generates various
+                * spurious wakeup events when suspended. So disable wakeup
+                * for its handler (it uses the only ACPI GPIO event handler).
+                * This breaks wakeup when opening the lid, the user needs
+                * to press the power-button to wakeup the system. The
+                * alternative is suspend simply not working, which is worse.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+               },
+               .driver_data = (void *)QUIRK_NO_WAKEUP,
        },
        {} /* Terminating entry */
 };
 
 static int acpi_gpio_setup_params(void)
 {
+       const struct dmi_system_id *id;
+       long quirks = 0;
+
+       id = dmi_first_match(gpiolib_acpi_quirks);
+       if (id)
+               quirks = (long)id->driver_data;
+
        if (run_edge_events_on_boot < 0) {
-               if (dmi_check_system(run_edge_events_on_boot_blacklist))
+               if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
                        run_edge_events_on_boot = 0;
                else
                        run_edge_events_on_boot = 1;
        }
 
+       if (honor_wakeup < 0) {
+               if (quirks & QUIRK_NO_WAKEUP)
+                       honor_wakeup = 0;
+               else
+                       honor_wakeup = 1;
+       }
+
        return 0;
 }
 
index dc27b1a88e9343a8a029832cef5c6095a7be6238..1b3f217a35e2321e301bc86e9fc5d26db66925b2 100644 (file)
 #include "gpiolib.h"
 #include "gpiolib-of.h"
 
+/**
+ * of_gpio_spi_cs_get_count() - special GPIO counting for SPI
+ * Some elder GPIO controllers need special quirks. Currently we handle
+ * the Freescale GPIO controller with bindings that doesn't use the
+ * established "cs-gpios" for chip selects but instead rely on
+ * "gpios" for the chip select lines. If we detect this, we redirect
+ * the counting of "cs-gpios" to count "gpios" transparent to the
+ * driver.
+ */
+static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
+{
+       struct device_node *np = dev->of_node;
+
+       if (!IS_ENABLED(CONFIG_SPI_MASTER))
+               return 0;
+       if (!con_id || strcmp(con_id, "cs"))
+               return 0;
+       if (!of_device_is_compatible(np, "fsl,spi") &&
+           !of_device_is_compatible(np, "aeroflexgaisler,spictrl"))
+               return 0;
+       return of_gpio_named_count(np, "gpios");
+}
+
 /*
  * This is used by external users of of_gpio_count() from <linux/of_gpio.h>
  *
@@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id)
        char propname[32];
        unsigned int i;
 
+       ret = of_gpio_spi_cs_get_count(dev, con_id);
+       if (ret > 0)
+               return ret;
+
        for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
                if (con_id)
                        snprintf(propname, sizeof(propname), "%s-%s",
@@ -104,27 +131,6 @@ static void of_gpio_flags_quirks(struct device_node *np,
                                 enum of_gpio_flags *flags,
                                 int index)
 {
-       /*
-        * Handle MMC "cd-inverted" and "wp-inverted" semantics.
-        */
-       if (IS_ENABLED(CONFIG_MMC)) {
-               /*
-                * Active low is the default according to the
-                * SDHCI specification and the device tree
-                * bindings. However the code in the current
-                * kernel was written such that the phandle
-                * flags were always respected, and "cd-inverted"
-                * would invert the flag from the device phandle.
-                */
-               if (!strcmp(propname, "cd-gpios")) {
-                       if (of_property_read_bool(np, "cd-inverted"))
-                               *flags ^= OF_GPIO_ACTIVE_LOW;
-               }
-               if (!strcmp(propname, "wp-gpios")) {
-                       if (of_property_read_bool(np, "wp-inverted"))
-                               *flags ^= OF_GPIO_ACTIVE_LOW;
-               }
-       }
        /*
         * Some GPIO fixed regulator quirks.
         * Note that active low is the default.
index 9913886ede904bb09e40cca32792a01b39bb2822..bcfbfded9ba3feeedd7e9434492f4f15dd921ca5 100644 (file)
@@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
        chip = gpiod_to_chip(desc);
        offset = gpio_chip_hwgpio(desc);
 
+       /*
+        * Open drain emulation using input mode may incorrectly report
+        * input here, fix that up.
+        */
+       if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+           test_bit(FLAG_IS_OUT, &desc->flags))
+               return 0;
+
        if (!chip->get_direction)
                return -ENOTSUPP;
 
@@ -3363,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc)
 }
 EXPORT_SYMBOL_GPL(gpiod_is_active_low);
 
+/**
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to change
+ */
+void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+       VALIDATE_DESC_VOID(desc);
+       change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+
 /* I/O calls are only valid after configuration completed; the relevant
  * "is this a valid GPIO" error checks should already have been done.
  *
@@ -4472,8 +4491,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
 
                if (chip->ngpio <= p->chip_hwnum) {
                        dev_err(dev,
-                               "requested GPIO %d is out of range [0..%d] for chip %s\n",
-                               idx, chip->ngpio, chip->label);
+                               "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
+                               idx, p->chip_hwnum, chip->ngpio - 1,
+                               chip->label);
                        return ERR_PTR(-EINVAL);
                }
 
index d968c24714125288693c7754e587f9f5f5116dfe..0d12ebf661743f7c87898eb9febaaa1aa505672f 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
 menu "ACP (Audio CoProcessor) Configuration"
 
 config DRM_AMD_ACP
index 2e98c016cb47dbcf947ccaae97cb11fc8fde98a6..9375e7f1242057274c70eec0f24678bdee6f0965 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
 config DRM_AMDGPU_SI
        bool "Enable amdgpu support for SI parts"
        depends on DRM_AMDGPU
index a97fb759e2f42a94b004743219881f47e79a20ee..3e35a8f2c5e553e4c714deff5e838ac1510a2f59 100644 (file)
@@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
        bool d3_supported = false;
        struct pci_dev *parent_pdev;
 
-       while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               vga_count++;
+
+               has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+               parent_pdev = pci_upstream_bridge(pdev);
+               d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+               amdgpu_atpx_get_quirks(pdev);
+       }
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
                vga_count++;
 
                has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
index 2cdaf3b2a72170c5a638530dce89007e850735a8..6614d8a6f4c8d367dcb8bb281f70514f6a054f22 100644 (file)
@@ -604,11 +604,8 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
                        continue;
                }
 
-               for (i = 0; i < num_entities; i++) {
-                       mutex_lock(&ctx->adev->lock_reset);
+               for (i = 0; i < num_entities; i++)
                        drm_sched_entity_fini(&ctx->entities[0][i].entity);
-                       mutex_unlock(&ctx->adev->lock_reset);
-               }
        }
 }
 
index 0ffc9447b573a3f532d1fb37337d240052bdaa18..30a1e3ac21d669c3b0b21c62c4c7b0122dc4df84 100644 (file)
@@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = 0;
 int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
-int amdgpu_noretry = 1;
+int amdgpu_noretry;
 int amdgpu_force_asic_type = -1;
 
 struct amdgpu_mgpu_info mgpu_info = {
@@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes,
 module_param_named(mes, amdgpu_mes, int, 0444);
 
 MODULE_PARM_DESC(noretry,
-       "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
+       "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
 module_param_named(noretry, amdgpu_noretry, int, 0644);
 
 /**
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
 
        /* Renoir */
-       {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
 
        /* Navi12 */
        {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
@@ -1359,7 +1359,8 @@ static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_ATOMIC |
            DRIVER_GEM |
-           DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+           DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+           DRIVER_SYNCOBJ_TIMELINE,
        .load = amdgpu_driver_load_kms,
        .open = amdgpu_driver_open_kms,
        .postclose = amdgpu_driver_postclose_kms,
index 44be3a45b25eaf2453a649d244365f2f5a727eb5..e1b8d8daeafcb6716e2854e2785c2221eeec115f 100644 (file)
@@ -1488,7 +1488,7 @@ out:
 
                /* Start rlc autoload after psp recieved all the gfx firmware */
                if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
-                   AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
+                   AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
                        ret = psp_rlc_autoload(psp);
                        if (ret) {
                                DRM_ERROR("Failed to start rlc autoload\n");
index 410587b950f3c8490c881c982281e1e45aae2b20..914acecda5cfab806943ff242e5d229d231cd3c0 100644 (file)
@@ -292,10 +292,10 @@ enum AMDGPU_UCODE_ID {
        AMDGPU_UCODE_ID_CP_MEC2_JT,
        AMDGPU_UCODE_ID_CP_MES,
        AMDGPU_UCODE_ID_CP_MES_DATA,
-       AMDGPU_UCODE_ID_RLC_G,
        AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
        AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
        AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+       AMDGPU_UCODE_ID_RLC_G,
        AMDGPU_UCODE_ID_STORAGE,
        AMDGPU_UCODE_ID_SMC,
        AMDGPU_UCODE_ID_UVD,
index 16fbd2bc8ad1ea00d3f1c7a6c9e8df996970571e..4043ebcea5de6e1efca0a89dad8356e5c5de385e 100644 (file)
@@ -268,23 +268,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 {
        u32 tmp;
 
-       /* Put DF on broadcast mode */
-       adev->df_funcs->enable_broadcast_mode(adev, true);
-
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
-               tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
-               tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
-               tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
-               WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
-       } else {
-               tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
-               tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
-               tmp |= DF_V3_6_MGCG_DISABLE;
-               WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
-       }
+       if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
+               /* Put DF on broadcast mode */
+               adev->df_funcs->enable_broadcast_mode(adev, true);
+
+               if (enable) {
+                       tmp = RREG32_SOC15(DF, 0,
+                                       mmDF_PIE_AON0_DfGlobalClkGater);
+                       tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+                       tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
+                       WREG32_SOC15(DF, 0,
+                                       mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+               } else {
+                       tmp = RREG32_SOC15(DF, 0,
+                                       mmDF_PIE_AON0_DfGlobalClkGater);
+                       tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+                       tmp |= DF_V3_6_MGCG_DISABLE;
+                       WREG32_SOC15(DF, 0,
+                                       mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+               }
 
-       /* Exit broadcast mode */
-       adev->df_funcs->enable_broadcast_mode(adev, false);
+               /* Exit broadcast mode */
+               adev->df_funcs->enable_broadcast_mode(adev, false);
+       }
 }
 
 static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
index f2c1b026397b9520f6f6c4ccc26d6800ec3a3374..ba9e53a1abc3affc852774537d5840a27f3c7379 100644 (file)
@@ -117,10 +117,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
@@ -162,10 +165,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
index 983db77999e7a6d27800c7d42f2cd03028f5d194..52a647d7022d2bf8382e95d36419bf1e3dd326e7 100644 (file)
@@ -6146,7 +6146,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
        bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
 
-       /* EVENT_WRITE_EOP - flush caches, send int */
+       /* Workaround for cache flush problems. First send a dummy EOP
+        * event down the pipe with seq one below.
+        */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+       amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
+                                EOP_TC_ACTION_EN |
+                                EOP_TC_WB_ACTION_EN |
+                                EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+                                EVENT_INDEX(5)));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+                               DATA_SEL(1) | INT_SEL(0));
+       amdgpu_ring_write(ring, lower_32_bits(seq - 1));
+       amdgpu_ring_write(ring, upper_32_bits(seq - 1));
+
+       /* Then send the real EOP event down the pipe:
+        * EVENT_WRITE_EOP - flush caches, send int */
        amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
        amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
                                 EOP_TC_ACTION_EN |
@@ -6888,7 +6904,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
                5 +  /* COND_EXEC */
                7 +  /* PIPELINE_SYNC */
                VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
-               8 +  /* FENCE for VM_FLUSH */
+               12 +  /* FENCE for VM_FLUSH */
                20 + /* GDS switch */
                4 + /* double SWITCH_BUFFER,
                       the first COND_EXEC jump to the place just
@@ -6900,7 +6916,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
                31 + /* DE_META */
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
-               8 + 8 + /* FENCE x2 */
+               12 + 12 + /* FENCE x2 */
                2, /* SWITCH_BUFFER */
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
index 66328ffa395af240cb6f069206a4a0217de69aeb..97105a5bb246c34d1306819d123d61d1e615fe77 100644 (file)
@@ -1052,17 +1052,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
        case CHIP_VEGA20:
                break;
        case CHIP_RAVEN:
-               /* Disable GFXOFF on original raven.  There are combinations
-                * of sbios and platforms that are not stable.
-                */
-               if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
-                       adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
-               else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
-                        &&((adev->gfx.rlc_fw_version != 106 &&
-                            adev->gfx.rlc_fw_version < 531) ||
-                           (adev->gfx.rlc_fw_version == 53815) ||
-                           (adev->gfx.rlc_feature_version < 1) ||
-                           !adev->gfx.rlc.is_rlc_v2_1))
+               if (!(adev->rev_id >= 0x8 ||
+                     adev->pdev->device == 0x15d8) &&
+                   (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
+                    !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
                        adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
                if (adev->pm.pp_feature & PP_GFXOFF_MASK)
index 2324695074463a9bd3fe16054fc057bb1ef7927c..f5725336a5f26dbc3082bd52a90bc260c557a026 100644 (file)
@@ -219,6 +219,21 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
        return req;
 }
 
+/**
+ * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+                                      uint32_t vmhub)
+{
+       return ((vmhub == AMDGPU_MMHUB_0 ||
+                vmhub == AMDGPU_MMHUB_1) &&
+               (!amdgpu_sriov_vf(adev)));
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -229,6 +244,7 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
                                   unsigned int vmhub, uint32_t flush_type)
 {
+       bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
        struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
        u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
        /* Use register 17 for GART */
@@ -244,8 +260,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
         */
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (vmhub == AMDGPU_MMHUB_0 ||
-           vmhub == AMDGPU_MMHUB_1) {
+       if (use_semaphore) {
                for (i = 0; i < adev->usec_timeout; i++) {
                        /* a read return value of 1 means semaphore acuqire */
                        tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
@@ -278,8 +293,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        }
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (vmhub == AMDGPU_MMHUB_0 ||
-           vmhub == AMDGPU_MMHUB_1)
+       if (use_semaphore)
                /*
                 * add semaphore release after invalidation,
                 * write with 0 means semaphore release
@@ -369,6 +383,7 @@ error_alloc:
 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
                                             unsigned vmid, uint64_t pd_addr)
 {
+       bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
        uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
        unsigned eng = ring->vm_inv_eng;
@@ -381,8 +396,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
         */
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
-           ring->funcs->vmhub == AMDGPU_MMHUB_1)
+       if (use_semaphore)
                /* a read return value of 1 means semaphore acuqire */
                amdgpu_ring_emit_reg_wait(ring,
                                          hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
@@ -398,8 +412,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
                                            req, 1 << vmid);
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
-           ring->funcs->vmhub == AMDGPU_MMHUB_1)
+       if (use_semaphore)
                /*
                 * add semaphore release after invalidation,
                 * write with 0 means semaphore release
index 3c355fb5d2b47227621c220115ebc1957841399a..a5b68b5e452fb96c24aa052e288c4880c5bbeb11 100644 (file)
@@ -416,6 +416,24 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
        return req;
 }
 
+/**
+ * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
+ *
+ * @adev: amdgpu_device pointer
+ * @vmhub: vmhub type
+ *
+ */
+static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
+                                      uint32_t vmhub)
+{
+       return ((vmhub == AMDGPU_MMHUB_0 ||
+                vmhub == AMDGPU_MMHUB_1) &&
+               (!amdgpu_sriov_vf(adev)) &&
+               (!(adev->asic_type == CHIP_RAVEN &&
+                  adev->rev_id < 0x8 &&
+                  adev->pdev->device == 0x15d8)));
+}
+
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -435,6 +453,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                                        uint32_t vmhub, uint32_t flush_type)
 {
+       bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
        const unsigned eng = 17;
        u32 j, tmp;
        struct amdgpu_vmhub *hub;
@@ -468,8 +487,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
         */
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (vmhub == AMDGPU_MMHUB_0 ||
-           vmhub == AMDGPU_MMHUB_1) {
+       if (use_semaphore) {
                for (j = 0; j < adev->usec_timeout; j++) {
                        /* a read return value of 1 means semaphore acuqire */
                        tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
@@ -499,8 +517,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        }
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (vmhub == AMDGPU_MMHUB_0 ||
-           vmhub == AMDGPU_MMHUB_1)
+       if (use_semaphore)
                /*
                 * add semaphore release after invalidation,
                 * write with 0 means semaphore release
@@ -518,6 +535,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
                                            unsigned vmid, uint64_t pd_addr)
 {
+       bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
        uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
@@ -531,8 +549,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
         */
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
-           ring->funcs->vmhub == AMDGPU_MMHUB_1)
+       if (use_semaphore)
                /* a read return value of 1 means semaphore acuqire */
                amdgpu_ring_emit_reg_wait(ring,
                                          hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
@@ -548,8 +565,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
                                            req, 1 << vmid);
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
-       if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
-           ring->funcs->vmhub == AMDGPU_MMHUB_1)
+       if (use_semaphore)
                /*
                 * add semaphore release after invalidation,
                 * write with 0 means semaphore release
index 4ef4d31f52318086961e6637e9a32ece34778f0b..2f52b7f4d25c828acf162ba572d680ecc355d070 100644 (file)
@@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
 };
 
 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
index ba0e68057a89207df912040a37c49278b3a839d3..b3672d10ea54bfa76f01631265ca08fc0ccaa1e9 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
 #
 # Heterogenous system architecture configuration
 #
index 313183b800328c11cb7c5ec8d25a362afbe7037c..ae161fe86ebb6cca838d3f5ff845dc02178b5c6e 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
 menu "Display Engine Configuration"
        depends on DRM && DRM_AMDGPU
 
index 7aac9568d3bec2a95e5ef422a567a27f970ab83d..803e59d9741119b9790bb471fb395ac575f1cb49 100644 (file)
@@ -3356,27 +3356,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
        return color_space;
 }
 
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
-       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-               return;
-
-       timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
-                                               const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+       struct dc_crtc_timing *timing_out,
+       const struct drm_display_info *info)
 {
+       enum dc_color_depth depth = timing_out->display_color_depth;
        int normalized_clk;
-       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-               return;
        do {
                normalized_clk = timing_out->pix_clk_100hz / 10;
                /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
                if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
                        normalized_clk /= 2;
                /* Adjusting pix clock following on HDMI spec based on colour depth */
-               switch (timing_out->display_color_depth) {
+               switch (depth) {
+               case COLOR_DEPTH_888:
+                       break;
                case COLOR_DEPTH_101010:
                        normalized_clk = (normalized_clk * 30) / 24;
                        break;
@@ -3387,14 +3381,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
                        normalized_clk = (normalized_clk * 48) / 24;
                        break;
                default:
-                       return;
+                       /* The above depths are the only ones valid for HDMI. */
+                       return false;
                }
-               if (normalized_clk <= info->max_tmds_clock)
-                       return;
-               reduce_mode_colour_depth(timing_out);
-
-       } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+               if (normalized_clk <= info->max_tmds_clock) {
+                       timing_out->display_color_depth = depth;
+                       return true;
+               }
+       } while (--depth > COLOR_DEPTH_666);
+       return false;
 }
 
 static void fill_stream_properties_from_drm_display_mode(
@@ -3474,8 +3469,14 @@ static void fill_stream_properties_from_drm_display_mode(
 
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
-       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
-               adjust_colour_depth_from_display_info(timing_out, info);
+       if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+               if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+                   drm_mode_is_420_also(info, mode_in) &&
+                   timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+                       timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+                       adjust_colour_depth_from_display_info(timing_out, info);
+               }
+       }
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
index 7873abea4112b434d0d1def2f5d3634aae0a9f23..5c3fcaa474109f74cc8c6b589f7f9ae11590beb8 100644 (file)
@@ -1625,6 +1625,7 @@ static enum bp_result construct_integrated_info(
                /* Don't need to check major revision as they are all 1 */
                switch (revision.minor) {
                case 11:
+               case 12:
                        result = get_integrated_info_v11(bp, info);
                        break;
                default:
index 790a2d211bd6db21a6a04cccd4c7bc9cad0f4a60..35c55e54eac014311111a6e85b0cf3701cc525f4 100644 (file)
@@ -471,12 +471,28 @@ static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
 
 }
 
+static bool rn_are_clock_states_equal(struct dc_clocks *a,
+               struct dc_clocks *b)
+{
+       if (a->dispclk_khz != b->dispclk_khz)
+               return false;
+       else if (a->dppclk_khz != b->dppclk_khz)
+               return false;
+       else if (a->dcfclk_khz != b->dcfclk_khz)
+               return false;
+       else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+               return false;
+
+       return true;
+}
+
+
 static struct clk_mgr_funcs dcn21_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
        .update_clocks = rn_update_clocks,
        .init_clocks = rn_init_clocks,
        .enable_pme_wa = rn_enable_pme_wa,
-       /* .dump_clk_registers = rn_dump_clk_registers, */
+       .are_clock_states_equal = rn_are_clock_states_equal,
        .notify_wm_ranges = rn_notify_wm_ranges
 };
 
@@ -518,36 +534,83 @@ struct clk_bw_params rn_bw_params = {
                .num_entries = 4,
        },
 
-       .wm_table = {
-               .entries = {
-                       {
-                               .wm_inst = WM_A,
-                               .wm_type = WM_TYPE_PSTATE_CHG,
-                               .pstate_latency_us = 23.84,
-                               .valid = true,
-                       },
-                       {
-                               .wm_inst = WM_B,
-                               .wm_type = WM_TYPE_PSTATE_CHG,
-                               .pstate_latency_us = 23.84,
-                               .valid = true,
-                       },
-                       {
-                               .wm_inst = WM_C,
-                               .wm_type = WM_TYPE_PSTATE_CHG,
-                               .pstate_latency_us = 23.84,
-                               .valid = true,
-                       },
-                       {
-                               .wm_inst = WM_D,
-                               .wm_type = WM_TYPE_PSTATE_CHG,
-                               .pstate_latency_us = 23.84,
-                               .valid = true,
-                       },
+};
+
+struct wm_table ddr4_wm_table = {
+       .entries = {
+               {
+                       .wm_inst = WM_A,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 6.09,
+                       .sr_enter_plus_exit_time_us = 7.14,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_B,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 10.12,
+                       .sr_enter_plus_exit_time_us = 11.48,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_C,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 10.12,
+                       .sr_enter_plus_exit_time_us = 11.48,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_D,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 10.12,
+                       .sr_enter_plus_exit_time_us = 11.48,
+                       .valid = true,
                },
        }
 };
 
+struct wm_table lpddr4_wm_table = {
+       .entries = {
+               {
+                       .wm_inst = WM_A,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 23.84,
+                       .sr_exit_time_us = 12.5,
+                       .sr_enter_plus_exit_time_us = 17.0,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_B,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 23.84,
+                       .sr_exit_time_us = 12.5,
+                       .sr_enter_plus_exit_time_us = 17.0,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_C,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 23.84,
+                       .sr_exit_time_us = 12.5,
+                       .sr_enter_plus_exit_time_us = 17.0,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_D,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 23.84,
+                       .sr_exit_time_us = 12.5,
+                       .sr_enter_plus_exit_time_us = 17.0,
+                       .valid = true,
+               },
+       }
+};
+
+
 static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
 {
        int i;
@@ -561,7 +624,7 @@ static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsi
        return 0;
 }
 
-static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
 {
        int i, j = 0;
 
@@ -593,8 +656,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
                bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
        }
 
-       bw_params->vram_type = asic_id->vram_type;
-       bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
+       bw_params->vram_type = bios_info->memory_type;
+       bw_params->num_channels = bios_info->ma_channel_number;
 
        for (i = 0; i < WM_SET_COUNT; i++) {
                bw_params->wm_table.entries[i].wm_inst = i;
@@ -669,15 +732,24 @@ void rn_clk_mgr_construct(
                        ASSERT(clk_mgr->base.dprefclk_khz == 600000);
                        clk_mgr->base.dprefclk_khz = 600000;
                }
+
+               if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
+                       rn_bw_params.wm_table = lpddr4_wm_table;
+               } else {
+                       rn_bw_params.wm_table = ddr4_wm_table;
+               }
        }
 
        dce_clock_read_ss_info(clk_mgr);
 
+
        clk_mgr->base.bw_params = &rn_bw_params;
 
        if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
                pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
-               rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
+               if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+                       rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+               }
        }
 
        if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
index 12ba6fdf89b73213dceed7ee0f925a2e79fd11ae..4619f94f0ac78e138503345c54aa216b21b0c33c 100644 (file)
@@ -372,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
 
        if (GPIO_RESULT_OK != dal_ddc_open(
                ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
-               dal_gpio_destroy_ddc(&ddc);
+               dal_ddc_close(ddc);
 
                return present;
        }
@@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
                }
 
                case SIGNAL_TYPE_EDP: {
-                       read_current_link_settings_on_detect(link);
                        detect_edp_sink_caps(link);
+                       read_current_link_settings_on_detect(link);
                        sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
                        sink_caps.signal = SIGNAL_TYPE_EDP;
                        break;
index 7f904d55c1bceb6b136b64ad0818daddd2d778f4..81789191d4ec0aebc3f0c59e24903631ff60866a 100644 (file)
@@ -586,7 +586,7 @@ bool dal_ddc_service_query_ddc_data(
 bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
                struct aux_payload *payload)
 {
-       uint8_t retrieved = 0;
+       uint32_t retrieved = 0;
        bool ret = 0;
 
        if (!ddc)
index 0f59b68aa4c245e0d7cc4b18150ddb53c741a663..504055fc70e8970046df79bccc1e243b069d6fce 100644 (file)
@@ -3522,7 +3522,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
        if (link_enc->funcs->fec_set_enable &&
                        link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
                if (link->fec_state == dc_link_fec_ready && enable) {
-                       msleep(1);
+                       /* Accord to DP spec, FEC enable sequence can first
+                        * be transmitted anytime after 1000 LL codes have
+                        * been transmitted on the link after link training
+                        * completion. Using 1 lane RBR should have the maximum
+                        * time for transmitting 1000 LL codes which is 6.173 us.
+                        * So use 7 microseconds delay instead.
+                        */
+                       udelay(7);
                        link_enc->funcs->fec_set_enable(link_enc, true);
                        link->fec_state = dc_link_fec_enabled;
                } else if (link->fec_state == dc_link_fec_enabled && !enable) {
index e472608faf3351ba112a1968792be6f6e4aa207a..793c0cec407f9d36661216d55b55b0ba1fd3c6cd 100644 (file)
@@ -583,6 +583,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
        uint8_t reply;
        bool payload_reply = true;
        enum aux_channel_operation_result operation_result;
+       bool retry_on_defer = false;
+
        int aux_ack_retries = 0,
                aux_defer_retries = 0,
                aux_i2c_defer_retries = 0,
@@ -613,8 +615,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                        break;
 
                        case AUX_TRANSACTION_REPLY_AUX_DEFER:
-                       case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
                        case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+                               retry_on_defer = true;
+                               /* fall through */
+                       case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
                                if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
                                        goto fail;
                                } else {
@@ -647,15 +651,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                        break;
 
                case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
-                       if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
-                               goto fail;
-                       else {
-                               /*
-                                * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts
-                                * According to the DP spec there should be 3 retries total
-                                * with a 400us wait inbetween each. Hardware already waits
-                                * for 550us therefore no wait is required here.
-                                */
+                       // Check whether a DEFER had occurred before the timeout.
+                       // If so, treat timeout as a DEFER.
+                       if (retry_on_defer) {
+                               if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+                                       goto fail;
+                               else if (payload->defer_delay > 0)
+                                       msleep(payload->defer_delay);
+                       } else {
+                               if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+                                       goto fail;
+                               else {
+                                       /*
+                                        * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts
+                                        * According to the DP spec there should be 3 retries total
+                                        * with a 400us wait inbetween each. Hardware already waits
+                                        * for 550us therefore no wait is required here.
+                                        */
+                               }
                        }
                        break;
 
index 63f3bddba7daaa50ab93dbd9a42fc13327208d84..10b47986526bd8a04b51243303b84657971eb344 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
 #
 # Makefile for DCN.
 
index 09793336d84f6944b44092fbce09e8f5d74b2283..23ff2f1c75b5ca2c7333f55d74a7a2b81b2999db 100644 (file)
@@ -923,7 +923,9 @@ static const struct resource_caps res_cap_nv14 = {
                .num_dwb = 1,
                .num_ddc = 5,
                .num_vmid = 16,
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
                .num_dsc = 5,
+#endif
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
@@ -1536,13 +1538,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
 
 static void acquire_dsc(struct resource_context *res_ctx,
                        const struct resource_pool *pool,
-                       struct display_stream_compressor **dsc)
+                       struct display_stream_compressor **dsc,
+                       int pipe_idx)
 {
        int i;
 
        ASSERT(*dsc == NULL);
        *dsc = NULL;
 
+       if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+               *dsc = pool->dscs[pipe_idx];
+               res_ctx->is_dsc_acquired[pipe_idx] = true;
+               return;
+       }
+
        /* Find first free DSC */
        for (i = 0; i < pool->res_cap->num_dsc; i++)
                if (!res_ctx->is_dsc_acquired[i]) {
@@ -1585,7 +1594,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
                if (pipe_ctx->stream != dc_stream)
                        continue;
 
-               acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
+               acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
 
                /* The number of DSCs can be less than the number of pipes */
                if (!pipe_ctx->stream_res.dsc) {
@@ -1785,7 +1794,7 @@ bool dcn20_split_stream_for_odm(
        next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
        if (next_odm_pipe->stream->timing.flags.DSC == 1) {
-               acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc);
+               acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
                ASSERT(next_odm_pipe->stream_res.dsc);
                if (next_odm_pipe->stream_res.dsc == NULL)
                        return false;
index 4b34016164348706e8be44aca42ef24d0aa89b13..fcb3877b4fcb2896c214f1bff48965a633904e1b 100644 (file)
@@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank(
                                DP_VID_N_MUL, n_multiply);
        }
 
-       /* set DIG_START to 0x1 to reset FIFO */
+       /* make sure stream is disabled before resetting steer fifo */
+       REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
+       REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
 
+       /* set DIG_START to 0x1 to reset FIFO */
        REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+       udelay(1);
 
        /* write 0 to take the FIFO out of reset */
 
        REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
 
-       /* switch DP encoder to CRTC data */
+       /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
+        * that it overflows during mode transition, and sometimes doesn't recover.
+        */
+       REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
+       udelay(10);
 
        REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
 
index 14113ccf498d27c6d9eb48790cc83821fabb989e..5b8c17564bc198de20570550863f8d5c662b8c9c 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
 #
 # Makefile for DCN21.
 
index 459bd9a5caed54dbdb1cffcbe2ea7dc8ba70ff7c..b29b2c99a564edb951c8d89dd1cae0caf638325c 100644 (file)
@@ -23,6 +23,8 @@
  *
  */
 
+#include <linux/slab.h>
+
 #include "dm_services.h"
 #include "dc.h"
 
@@ -257,7 +259,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .vmm_page_size_bytes = 4096,
        .dram_clock_change_latency_us = 23.84,
        .return_bus_width_bytes = 64,
-       .dispclk_dppclk_vco_speed_mhz = 3550,
+       .dispclk_dppclk_vco_speed_mhz = 3600,
        .xfc_bus_transport_time_us = 4,
        .xfc_xbuf_latency_tolerance_us = 4,
        .use_urgent_burst_bw = 1,
@@ -1000,6 +1002,8 @@ static void calculate_wm_set_for_vlevel(
        pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
 
        dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+       dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+       dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
 
        wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
        wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
@@ -1017,14 +1021,21 @@ static void calculate_wm_set_for_vlevel(
 
 static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
 {
+       int i;
+
        kernel_fpu_begin();
        if (dc->bb_overrides.sr_exit_time_ns) {
-               bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+               for (i = 0; i < WM_SET_COUNT; i++) {
+                         dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
+                                         dc->bb_overrides.sr_exit_time_ns / 1000.0;
+               }
        }
 
        if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
-               bb->sr_enter_plus_exit_time_us =
-                               dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+               for (i = 0; i < WM_SET_COUNT; i++) {
+                         dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
+                                         dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+               }
        }
 
        if (dc->bb_overrides.urgent_latency_ns) {
@@ -1032,9 +1043,12 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
        }
 
        if (dc->bb_overrides.dram_clock_change_latency_ns) {
-               bb->dram_clock_change_latency_us =
+               for (i = 0; i < WM_SET_COUNT; i++) {
+                       dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
                                dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+               }
        }
+
        kernel_fpu_end();
 }
 
index 970737217e53a18ee4331e3cc3aa570c63a79780..641ffb7cfaed4dbc739a707176e07baa08b65268 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: MIT
 #
 # Makefile for the 'dsc' sub-component of DAL.
 
index 4e18e77dcf422d0db1c76319595a897c300976fa..026e6a2a2c44cc2c6c01ee9755e8e8d206a16af0 100644 (file)
@@ -69,6 +69,8 @@ struct wm_range_table_entry {
        unsigned int wm_inst;
        unsigned int wm_type;
        double pstate_latency_us;
+       double sr_exit_time_us;
+       double sr_enter_plus_exit_time_us;
        bool valid;
 };
 
index bb012cb1a9f58bb515db94502a461a181c49d89f..c7fbb9c3ad6b3fe59814f2f3589dbde139296f59 100644 (file)
@@ -42,7 +42,7 @@ struct aux_payload {
        bool write;
        bool mot;
        uint32_t address;
-       uint8_t length;
+       uint32_t length;
        uint8_t *data;
        /*
         * used to return the reply type of the transaction
index 16e69bbc69aaa7b04a458880199962199abdf824..5437b50e9f90d1c6ee27fe966b9c42771ecd4023 100644 (file)
@@ -37,8 +37,8 @@
 #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
 /* Number of elements in the render times cache array */
 #define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_MAX_MARGIN 2500
+/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_EXIT_MARGIN 2000
 /* Threshold to change BTR multiplier (to avoid frequent changes) */
 #define BTR_DRIFT_MARGIN 2000
 /*Threshold to exit fixed refresh rate*/
@@ -254,22 +254,24 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
        unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
        unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
        unsigned int frames_to_insert = 0;
+       unsigned int min_frame_duration_in_ns = 0;
+       unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
        unsigned int delta_from_mid_point_delta_in_us;
-       unsigned int max_render_time_in_us =
-                       in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
+
+       min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+               (1000000000ULL * 1000000),
+               in_out_vrr->max_refresh_in_uhz)));
 
        /* Program BTR */
-       if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
+       if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
                /* Exit Below the Range */
                if (in_out_vrr->btr.btr_active) {
                        in_out_vrr->btr.frame_counter = 0;
                        in_out_vrr->btr.btr_active = false;
                }
-       } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
+       } else if (last_render_time_in_us > max_render_time_in_us) {
                /* Enter Below the Range */
-               if (!in_out_vrr->btr.btr_active) {
-                       in_out_vrr->btr.btr_active = true;
-               }
+               in_out_vrr->btr.btr_active = true;
        }
 
        /* BTR set to "not active" so disengage */
@@ -325,9 +327,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
                /* Choose number of frames to insert based on how close it
                 * can get to the mid point of the variable range.
                 */
-               if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
-                               (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
-                                               mid_point_frames_floor < 2)) {
+               if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
                        frames_to_insert = mid_point_frames_ceil;
                        delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
                                        delta_from_mid_point_in_us_1;
@@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
                if (in_out_vrr->btr.frames_to_insert != 0 &&
                                delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
                        if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
-                                       max_render_time_in_us) &&
+                                       in_out_vrr->max_duration_in_us) &&
                                ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
                                        in_out_vrr->min_duration_in_us))
                                frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                refresh_range = in_out_vrr->max_refresh_in_uhz -
                                in_out_vrr->min_refresh_in_uhz;
 
-               in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
-                               2 * in_out_vrr->min_duration_in_us;
-               if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
-                       in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
-
                in_out_vrr->supported = true;
        }
 
@@ -816,7 +811,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
        in_out_vrr->btr.inserted_duration_in_us = 0;
        in_out_vrr->btr.frames_to_insert = 0;
        in_out_vrr->btr.frame_counter = 0;
-
        in_out_vrr->btr.mid_point_in_us =
                                (in_out_vrr->min_duration_in_us +
                                 in_out_vrr->max_duration_in_us) / 2;
index dbe7835aabcf747c25825e76dac9d2e047bb9025..dc187844d10b1f5e99e4423f15cadb6b422643d5 100644 (file)
@@ -92,7 +92,6 @@ struct mod_vrr_params_btr {
        uint32_t inserted_duration_in_us;
        uint32_t frames_to_insert;
        uint32_t frame_counter;
-       uint32_t margin_in_us;
 };
 
 struct mod_vrr_params_fixed_refresh {
index 5ff7ccedfbed45212865c75bad0289f42688d8e9..a23729d3174b4e205c3bb67007879b0f526e7e61 100644 (file)
@@ -866,6 +866,7 @@ static int smu_sw_init(void *handle)
        smu->smu_baco.platform_support = false;
 
        mutex_init(&smu->sensor_lock);
+       mutex_init(&smu->metrics_lock);
 
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
index ce3566ca3e24bc9c01ea1f382fa7b80b7692d974..472e9fed411a4d087a1fab5add0f6031a9123e21 100644 (file)
@@ -862,18 +862,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
+       mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time ||
             time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
+                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
        memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+       mutex_unlock(&smu->metrics_lock);
 
        return ret;
 }
@@ -1313,12 +1316,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
                                        "VR",
                                        "COMPUTE",
                                        "CUSTOM"};
+       static const char *title[] = {
+                       "PROFILE_INDEX(NAME)"};
        uint32_t i, size = 0;
        int16_t workload_type = 0;
 
        if (!smu->pm_enabled || !buf)
                return -EINVAL;
 
+       size += sprintf(buf + size, "%16s\n",
+                       title[0]);
+
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
                /*
                 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
index ac9758305ab3be1063025e67b126e87f81aeab79..41fce75b263f7b7d6d5c665f93a72e6259672c73 100644 (file)
@@ -349,6 +349,7 @@ struct smu_context
        const struct pptable_funcs      *ppt_funcs;
        struct mutex                    mutex;
        struct mutex                    sensor_lock;
+       struct mutex                    metrics_lock;
        uint64_t pool_size;
 
        struct smu_table_context        smu_table;
index 4a14fd1f9fd59ad7db9cd4727c91ef44ccffd297..ca62e92e5a4f303ace3810a7ad134f11281ec795 100644 (file)
@@ -562,17 +562,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
+       mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
+                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
        memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+       mutex_unlock(&smu->metrics_lock);
 
        return ret;
 }
index 60b9ff097142639d18fbc037f6cb2cfcfa2db3d8..0d3a3b0a934e9a767ebe98fa105c8ac80f39e863 100644 (file)
@@ -1678,17 +1678,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
+       mutex_lock(&smu->metrics_lock);
        if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
+                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
        memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+       mutex_unlock(&smu->metrics_lock);
 
        return ret;
 }
index 875a3a9eabfa1e08068dd265c5dbfc7cf3f36411..7d0e7b031e447f24a83e9197d92879754eab4169 100644 (file)
@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
        .get_modes = malidp_mw_connector_get_modes,
        .mode_valid = malidp_mw_connector_mode_valid,
 };
index 273dd80fabf3cadb53d30be128fbd29c69f969aa..e6afe4faeca6d02b194a320d5b67af61d8cb6529 100644 (file)
@@ -393,7 +393,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
                        memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
                        idx += req->u.i2c_read.transactions[i].num_bytes;
 
-                       buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+                       buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
                        buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
                        idx++;
                }
@@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
                    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
                        mstb->tx_slots[txmsg->seqno] = NULL;
                }
+               mgr->is_waiting_for_dwn_reply = false;
+
        }
 out:
        if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1199,6 +1201,7 @@ out:
        }
        mutex_unlock(&mgr->qlock);
 
+       drm_dp_mst_kick_tx(mgr);
        return ret;
 }
 
@@ -1913,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
        return parent_lct + 1;
 }
 
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+       switch (pdt) {
+       case DP_PEER_DEVICE_DP_LEGACY_CONV:
+       case DP_PEER_DEVICE_SST_SINK:
+               return true;
+       case DP_PEER_DEVICE_MST_BRANCHING:
+               /* For sst branch device */
+               if (!mcs)
+                       return true;
+
+               return false;
+       }
+       return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+                   bool new_mcs)
 {
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
        struct drm_dp_mst_branch *mstb;
        u8 rad[8], lct;
        int ret = 0;
 
-       if (port->pdt == new_pdt)
+       if (port->pdt == new_pdt && port->mcs == new_mcs)
                return 0;
 
        /* Teardown the old pdt, if there is one */
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /*
-                * If the new PDT would also have an i2c bus, don't bother
-                * with reregistering it
-                */
-               if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-                   new_pdt == DP_PEER_DEVICE_SST_SINK) {
-                       port->pdt = new_pdt;
-                       return 0;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /*
+                        * If the new PDT would also have an i2c bus,
+                        * don't bother with reregistering it
+                        */
+                       if (new_pdt != DP_PEER_DEVICE_NONE &&
+                           drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+                               port->pdt = new_pdt;
+                               port->mcs = new_mcs;
+                               return 0;
+                       }
 
-               /* remove i2c over sideband */
-               drm_dp_mst_unregister_i2c_bus(&port->aux);
-               break;
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               mutex_lock(&mgr->lock);
-               drm_dp_mst_topology_put_mstb(port->mstb);
-               port->mstb = NULL;
-               mutex_unlock(&mgr->lock);
-               break;
+                       /* remove i2c over sideband */
+                       drm_dp_mst_unregister_i2c_bus(&port->aux);
+               } else {
+                       mutex_lock(&mgr->lock);
+                       drm_dp_mst_topology_put_mstb(port->mstb);
+                       port->mstb = NULL;
+                       mutex_unlock(&mgr->lock);
+               }
        }
 
        port->pdt = new_pdt;
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /* add i2c over sideband */
-               ret = drm_dp_mst_register_i2c_bus(&port->aux);
-               break;
+       port->mcs = new_mcs;
 
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               lct = drm_dp_calculate_rad(port, rad);
-               mstb = drm_dp_add_mst_branch_device(lct, rad);
-               if (!mstb) {
-                       ret = -ENOMEM;
-                       DRM_ERROR("Failed to create MSTB for port %p", port);
-                       goto out;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /* add i2c over sideband */
+                       ret = drm_dp_mst_register_i2c_bus(&port->aux);
+               } else {
+                       lct = drm_dp_calculate_rad(port, rad);
+                       mstb = drm_dp_add_mst_branch_device(lct, rad);
+                       if (!mstb) {
+                               ret = -ENOMEM;
+                               DRM_ERROR("Failed to create MSTB for port %p",
+                                         port);
+                               goto out;
+                       }
 
-               mutex_lock(&mgr->lock);
-               port->mstb = mstb;
-               mstb->mgr = port->mgr;
-               mstb->port_parent = port;
+                       mutex_lock(&mgr->lock);
+                       port->mstb = mstb;
+                       mstb->mgr = port->mgr;
+                       mstb->port_parent = port;
 
-               /*
-                * Make sure this port's memory allocation stays
-                * around until its child MSTB releases it
-                */
-               drm_dp_mst_get_port_malloc(port);
-               mutex_unlock(&mgr->lock);
+                       /*
+                        * Make sure this port's memory allocation stays
+                        * around until its child MSTB releases it
+                        */
+                       drm_dp_mst_get_port_malloc(port);
+                       mutex_unlock(&mgr->lock);
 
-               /* And make sure we send a link address for this */
-               ret = 1;
-               break;
+                       /* And make sure we send a link address for this */
+                       ret = 1;
+               }
        }
 
 out:
@@ -2132,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
                goto error;
        }
 
-       if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-            port->pdt == DP_PEER_DEVICE_SST_SINK) &&
-           port->port_num >= DP_MST_LOGICAL_PORT_0) {
+       if (port->pdt != DP_PEER_DEVICE_NONE &&
+           drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
                port->cached_edid = drm_get_edid(port->connector,
                                                 &port->aux.ddc);
                drm_connector_set_tile_property(port->connector);
@@ -2198,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        struct drm_dp_mst_port *port;
        int old_ddps = 0, ret;
        u8 new_pdt = DP_PEER_DEVICE_NONE;
+       bool new_mcs = 0;
        bool created = false, send_link_addr = false, changed = false;
 
        port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2242,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        port->input = port_msg->input_port;
        if (!port->input)
                new_pdt = port_msg->peer_device_type;
-       port->mcs = port_msg->mcs;
+       new_mcs = port_msg->mcs;
        port->ddps = port_msg->ddps;
        port->ldps = port_msg->legacy_device_plug_status;
        port->dpcd_rev = port_msg->dpcd_revision;
@@ -2269,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                }
        }
 
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                send_link_addr = true;
        } else if (ret < 0) {
@@ -2283,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
         * we're coming out of suspend. In this case, always resend the link
         * address if there's an MSTB on this port
         */
-       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+           port->mcs)
                send_link_addr = true;
 
        if (port->connector)
@@ -2318,8 +2339,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
 {
        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
        struct drm_dp_mst_port *port;
-       int old_ddps, ret;
+       int old_ddps, old_input, ret, i;
        u8 new_pdt;
+       bool new_mcs;
        bool dowork = false, create_connector = false;
 
        port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2349,8 +2371,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        old_ddps = port->ddps;
+       old_input = port->input;
        port->input = conn_stat->input_port;
-       port->mcs = conn_stat->message_capability_status;
        port->ldps = conn_stat->legacy_device_plug_status;
        port->ddps = conn_stat->displayport_device_plug_status;
 
@@ -2363,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       new_mcs = conn_stat->message_capability_status;
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                dowork = true;
        } else if (ret < 0) {
@@ -2373,6 +2395,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
                dowork = false;
        }
 
+       if (!old_input && old_ddps != port->ddps && !port->ddps) {
+               for (i = 0; i < mgr->max_payloads; i++) {
+                       struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+                       struct drm_dp_mst_port *port_validated;
+
+                       if (!vcpi)
+                               continue;
+
+                       port_validated =
+                               container_of(vcpi, struct drm_dp_mst_port, vcpi);
+                       port_validated =
+                               drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+                       if (!port_validated) {
+                               mutex_lock(&mgr->payload_lock);
+                               vcpi->num_slots = 0;
+                               mutex_unlock(&mgr->payload_lock);
+                       } else {
+                               drm_dp_mst_topology_put_port(port_validated);
+                       }
+               }
+       }
+
        if (port->connector)
                drm_modeset_unlock(&mgr->base.lock);
        else if (create_connector)
@@ -2718,9 +2762,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        ret = process_single_tx_qlock(mgr, txmsg, false);
        if (ret == 1) {
                /* txmsg is sent it should be in the slots now */
+               mgr->is_waiting_for_dwn_reply = true;
                list_del(&txmsg->next);
        } else if (ret) {
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+               mgr->is_waiting_for_dwn_reply = false;
                list_del(&txmsg->next);
                if (txmsg->seqno != -1)
                        txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2760,7 +2806,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
        }
 
-       if (list_is_singular(&mgr->tx_msg_downq))
+       if (list_is_singular(&mgr->tx_msg_downq) &&
+           !mgr->is_waiting_for_dwn_reply)
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -3678,6 +3725,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
        mutex_lock(&mgr->qlock);
        txmsg->state = DRM_DP_SIDEBAND_TX_RX;
        mstb->tx_slots[slot] = NULL;
+       mgr->is_waiting_for_dwn_reply = false;
        mutex_unlock(&mgr->qlock);
 
        wake_up_all(&mgr->tx_waitq);
@@ -3687,6 +3735,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 no_msg:
        drm_dp_mst_topology_put_mstb(mstb);
 clear_down_rep_recv:
+       mutex_lock(&mgr->qlock);
+       mgr->is_waiting_for_dwn_reply = false;
+       mutex_unlock(&mgr->qlock);
        memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
 
        return 0;
@@ -3896,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
        switch (port->pdt) {
        case DP_PEER_DEVICE_NONE:
        case DP_PEER_DEVICE_MST_BRANCHING:
+               if (!port->mcs)
+                       ret = connector_status_connected;
                break;
 
        case DP_PEER_DEVICE_SST_SINK:
@@ -4497,7 +4550,7 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (!list_empty(&mgr->tx_msg_downq))
+       if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -4508,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
        if (port->connector)
                port->mgr->cbs->destroy_connector(port->mgr, port->connector);
 
-       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
        drm_dp_mst_put_port_malloc(port);
 }
 
index 8ebeccdeed2334ebb06f9562668705074c7342b1..d8e8f3960f4dbd4f86139957fbb11351ceffb6f2 100644 (file)
@@ -1283,7 +1283,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
         * Changes struct fb_var_screeninfo are currently not pushed back
         * to KMS, hence fail if different settings are requested.
         */
-       if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+       if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
            var->xres > fb->width || var->yres > fb->height ||
            var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
                DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
@@ -1308,6 +1308,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
        }
 
+       /*
+        * Likewise, bits_per_pixel should be rounded up to a supported value.
+        */
+       var->bits_per_pixel = fb->format->cpp[0] * 8;
+
        /*
         * drm fbdev emulation doesn't support changing the pixel format at all,
         * so reject all pixel format changing requests.
index 7ae087b0504df7c7bacc2a4a4be340e017bfaf80..88b6fcaa20be0976d4ff3f3009a75d548a6398f7 100644 (file)
@@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
 
+       component_del(dev, &gsc_component_ops);
        pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
 
index afaf4bea21cfc32f86aa5f5f8124d4f85ad79b15..9278bcfad1bf4423cc44de353e60d0c7981256a1 100644 (file)
@@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
         *      Map the GTT and the stolen memory area
         */
        if (!resume)
-               dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+               dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
                                                gtt_pages << PAGE_SHIFT);
        if (!dev_priv->gtt_map) {
                dev_err(dev->dev, "Failure to map gtt.\n");
index 7005f8f69c683749a0e7493c001d929e7c1fa091..0900052fc4844fda252d362811f7e5f39deba7fa 100644 (file)
@@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
                                                            PSB_AUX_RESOURCE);
                        resource_len = pci_resource_len(dev_priv->aux_pdev,
                                                        PSB_AUX_RESOURCE);
-                       dev_priv->aux_reg = ioremap_nocache(resource_start,
+                       dev_priv->aux_reg = ioremap(resource_start,
                                                            resource_len);
                        if (!dev_priv->aux_reg)
                                goto out_err;
index 2fd4ca91a62d938c7a79e6076cc816982273434d..8dd5a43e548658c255db8cf03714ef49fe053057 100644 (file)
@@ -211,7 +211,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
 
        ioaddr = pci_resource_start(pdev, 1);
        iosize = pci_resource_len(pdev, 1);
-       priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+       priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
        if (!priv->mmio) {
                DRM_ERROR("Cannot map mmio region\n");
                return -ENOMEM;
index 85e6b2bbb34fc1132716544254d53d2a6b3dc17e..3a5ac13d58018c7a629c3816ae014214cf37e659 100644 (file)
@@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
                }
 
                /* Force CDCLK to 2*BCLK as long as we need audio powered. */
-               if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               if (IS_GEMINILAKE(dev_priv))
                        glk_force_audio_cdclk(dev_priv, true);
 
                if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
 
        /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
        if (--dev_priv->audio_power_refcount == 0)
-               if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+               if (IS_GEMINILAKE(dev_priv))
                        glk_force_audio_cdclk(dev_priv, false);
 
        intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
index c7c2b349858d221310da1513cf6e0d6d8b0fbe1d..2a27fb5d7dc6f0a8183a39e8f88783f73934c7b7 100644 (file)
@@ -3986,6 +3986,7 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
        if (conn_state->content_protection ==
            DRM_MODE_CONTENT_PROTECTION_DESIRED)
                intel_hdcp_enable(to_intel_connector(conn_state->connector),
+                                 crtc_state->cpu_transcoder,
                                  (u8)conn_state->hdcp_content_type);
 }
 
@@ -4089,7 +4090,9 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
        if (conn_state->content_protection ==
            DRM_MODE_CONTENT_PROTECTION_DESIRED ||
            content_protection_type_changed)
-               intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
+               intel_hdcp_enable(connector,
+                                 crtc_state->cpu_transcoder,
+                                 (u8)conn_state->hdcp_content_type);
 }
 
 static void
index 6f5e3bd13ad186fabdb451113949959a2699aa5c..301897791627e8c8f602193b51b345b623c692a2 100644 (file)
@@ -4515,8 +4515,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
 {
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       i915_reg_t reg;
-       u32 trans_ddi_func_ctl2_val;
 
        if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
                return;
@@ -4524,10 +4522,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
        DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
                      transcoder_name(old_crtc_state->cpu_transcoder));
 
-       reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
-       trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
-                                   PORT_SYNC_MODE_MASTER_SELECT_MASK);
-       I915_WRITE(reg, trans_ddi_func_ctl2_val);
+       I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
 }
 
 static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -15112,7 +15107,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                return ret;
 
        fb_obj_bump_render_priority(obj);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
        if (!new_plane_state->base.fence) { /* implicit fencing */
                struct dma_fence *fence;
index ce1b64f4dd44a9d6fa7465386c585614bb2d2858..12ba74788cceced326e655b93480d5fe876d6531 100644 (file)
@@ -3688,6 +3688,151 @@ static const struct i915_power_well_desc icl_power_wells[] = {
        },
 };
 
+static const struct i915_power_well_desc ehl_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = SKL_DISP_DC_OFF,
+       },
+       {
+               .name = "power well 2",
+               .domains = ICL_PW_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "power well 3",
+               .domains = ICL_PW_3_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DDI A IO",
+               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+               },
+       },
+       {
+               .name = "DDI B IO",
+               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+               },
+       },
+       {
+               .name = "DDI C IO",
+               .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+               },
+       },
+       {
+               .name = "DDI D IO",
+               .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+               },
+       },
+       {
+               .name = "AUX A",
+               .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+               },
+       },
+       {
+               .name = "AUX B",
+               .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+               },
+       },
+       {
+               .name = "AUX C",
+               .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+               },
+       },
+       {
+               .name = "AUX D",
+               .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+               },
+       },
+       {
+               .name = "power well 4",
+               .domains = ICL_PW_4_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_C),
+               },
+       },
+};
+
 static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "always-on",
@@ -3832,7 +3977,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX A",
                .domains = TGL_AUX_A_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
+               .ops = &hsw_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3842,7 +3987,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX B",
                .domains = TGL_AUX_B_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
+               .ops = &hsw_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -3852,7 +3997,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
        {
                .name = "AUX C",
                .domains = TGL_AUX_C_IO_POWER_DOMAINS,
-               .ops = &icl_combo_phy_aux_power_well_ops,
+               .ops = &hsw_power_well_ops,
                .id = DISP_PW_ID_NONE,
                {
                        .hsw.regs = &icl_aux_power_well_regs,
@@ -4162,6 +4307,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
         */
        if (IS_GEN(dev_priv, 12)) {
                err = set_power_wells(power_domains, tgl_power_wells);
+       } else if (IS_ELKHARTLAKE(dev_priv)) {
+               err = set_power_wells(power_domains, ehl_power_wells);
        } else if (IS_GEN(dev_priv, 11)) {
                err = set_power_wells(power_domains, icl_power_wells);
        } else if (IS_CANNONLAKE(dev_priv)) {
index 050655a1a3d8d9c2920cf557f3a81388c2a60870..b05b2191b919b1ff886f7335ddf423ffbb3751c7 100644 (file)
@@ -2414,9 +2414,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
        intel_psr_compute_config(intel_dp, pipe_config);
 
-       intel_hdcp_transcoder_config(intel_connector,
-                                    pipe_config->cpu_transcoder);
-
        return 0;
 }
 
index 3111ecaeabd0ef4717a01fe76e7488ad2b3fea6c..20616639b8ab10180f345cc2f247a67047b8e38d 100644 (file)
@@ -1284,7 +1284,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
                return 0;
 
        /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
-       if (IS_GEMINILAKE(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return 0;
 
        if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
index 84b164f31895b79ebb59ab1000ec79af699e7acd..6cb02c912accf079529cba521718c7c8492ba76c 100644 (file)
@@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref)
                vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
        spin_unlock(&obj->vma.lock);
 
-       obj->frontbuffer = NULL;
+       RCU_INIT_POINTER(obj->frontbuffer, NULL);
        spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
 
        i915_gem_object_put(obj);
-       kfree(front);
+       kfree_rcu(front, rcu);
 }
 
 struct intel_frontbuffer *
@@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct intel_frontbuffer *front;
 
-       spin_lock(&i915->fb_tracking.lock);
-       front = obj->frontbuffer;
-       if (front)
-               kref_get(&front->ref);
-       spin_unlock(&i915->fb_tracking.lock);
+       front = __intel_frontbuffer_get(obj);
        if (front)
                return front;
 
@@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
                         i915_active_may_sleep(frontbuffer_retire));
 
        spin_lock(&i915->fb_tracking.lock);
-       if (obj->frontbuffer) {
+       if (rcu_access_pointer(obj->frontbuffer)) {
                kfree(front);
-               front = obj->frontbuffer;
+               front = rcu_dereference_protected(obj->frontbuffer, true);
                kref_get(&front->ref);
        } else {
                i915_gem_object_get(obj);
-               obj->frontbuffer = front;
+               rcu_assign_pointer(obj->frontbuffer, front);
        }
        spin_unlock(&i915->fb_tracking.lock);
 
index adc64d61a4a5c424275d1b2eb71f0101531624d2..6d41f539442508b373bed76d584ca08657cddcec 100644 (file)
 #include <linux/atomic.h>
 #include <linux/kref.h>
 
+#include "gem/i915_gem_object_types.h"
 #include "i915_active.h"
 
 struct drm_i915_private;
-struct drm_i915_gem_object;
 
 enum fb_op_origin {
        ORIGIN_GTT,
@@ -45,6 +45,7 @@ struct intel_frontbuffer {
        atomic_t bits;
        struct i915_active write;
        struct drm_i915_gem_object *obj;
+       struct rcu_head rcu;
 };
 
 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
@@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 void intel_frontbuffer_flip(struct drm_i915_private *i915,
                            unsigned frontbuffer_bits);
 
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+       struct intel_frontbuffer *front;
+
+       if (likely(!rcu_access_pointer(obj->frontbuffer)))
+               return NULL;
+
+       rcu_read_lock();
+       do {
+               front = rcu_dereference(obj->frontbuffer);
+               if (!front)
+                       break;
+
+               if (unlikely(!kref_get_unless_zero(&front->ref)))
+                       continue;
+
+               if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+                       break;
+
+               intel_frontbuffer_put(front);
+       } while (1);
+       rcu_read_unlock();
+
+       return front;
+}
+
 struct intel_frontbuffer *
 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
 
@@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
                             struct intel_frontbuffer *new,
                             unsigned int frontbuffer_bits);
 
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
 #endif /* __INTEL_FRONTBUFFER_H__ */
index f1f41ca8402bf79585047d2c269c6bbe40675e4b..a448815d8fc2e70bc18689bb169074f70343bac5 100644 (file)
@@ -1821,23 +1821,6 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
        }
 }
 
-void intel_hdcp_transcoder_config(struct intel_connector *connector,
-                                 enum transcoder cpu_transcoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-
-       if (!hdcp->shim)
-               return;
-
-       if (INTEL_GEN(dev_priv) >= 12) {
-               mutex_lock(&hdcp->mutex);
-               hdcp->cpu_transcoder = cpu_transcoder;
-               hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
-               mutex_unlock(&hdcp->mutex);
-       }
-}
-
 static inline int initialize_hdcp_port_data(struct intel_connector *connector,
                                            const struct intel_hdcp_shim *shim)
 {
@@ -1959,8 +1942,10 @@ int intel_hdcp_init(struct intel_connector *connector,
        return 0;
 }
 
-int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
+int intel_hdcp_enable(struct intel_connector *connector,
+                     enum transcoder cpu_transcoder, u8 content_type)
 {
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
        unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
        int ret = -EINVAL;
@@ -1972,6 +1957,11 @@ int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
        WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
        hdcp->content_type = content_type;
 
+       if (INTEL_GEN(dev_priv) >= 12) {
+               hdcp->cpu_transcoder = cpu_transcoder;
+               hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+       }
+
        /*
         * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
         * is capable of HDCP2.2, it is preferred to use HDCP2.2.
index 41c1053d9e38b1e6a3f24fa1b21a3c10b07fb40a..f3c3272e712a18a6fdb6e8cc255ca90d936a3073 100644 (file)
@@ -21,11 +21,10 @@ enum transcoder;
 void intel_hdcp_atomic_check(struct drm_connector *connector,
                             struct drm_connector_state *old_state,
                             struct drm_connector_state *new_state);
-void intel_hdcp_transcoder_config(struct intel_connector *connector,
-                                 enum transcoder cpu_transcoder);
 int intel_hdcp_init(struct intel_connector *connector,
                    const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
+int intel_hdcp_enable(struct intel_connector *connector,
+                     enum transcoder cpu_transcoder, u8 content_type);
 int intel_hdcp_disable(struct intel_connector *connector);
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
 bool intel_hdcp_capable(struct intel_connector *connector);
index f6f5312205c49ede75fed758cab856b696882bab..f56fffc474faf98868420d81ac415552973c491a 100644 (file)
@@ -2489,9 +2489,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
                return -EINVAL;
        }
 
-       intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
-                                    pipe_config->cpu_transcoder);
-
        return 0;
 }
 
index 848ce07a8ec2e505a4d561d2d27cf96785e359e6..8a98a1aa7adcd455a04a9f809869408aa0b19103 100644 (file)
@@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
                                       struct i915_vma *vma)
 {
        enum pipe pipe = overlay->crtc->pipe;
+       struct intel_frontbuffer *from = NULL, *to = NULL;
 
        WARN_ON(overlay->old_vma);
 
-       intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
-                               vma ? vma->obj->frontbuffer : NULL,
-                               INTEL_FRONTBUFFER_OVERLAY(pipe));
+       if (overlay->vma)
+               from = intel_frontbuffer_get(overlay->vma->obj);
+       if (vma)
+               to = intel_frontbuffer_get(vma->obj);
+
+       intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+       if (to)
+               intel_frontbuffer_put(to);
+       if (from)
+               intel_frontbuffer_put(from);
 
        intel_frontbuffer_flip_prepare(overlay->i915,
                                       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -766,7 +775,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                ret = PTR_ERR(vma);
                goto out_pin_section;
        }
-       intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
 
        if (!overlay->active) {
                u32 oconfig;
index 3d4f5775a4baa3cc491a7aa98a577113ceb7dee4..25235ef630c10829b1041077f71af241d37a8f70 100644 (file)
@@ -9,16 +9,16 @@
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
 
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
 {
-       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+       if (id == (u16)I915_ENGINE_CLASS_INVALID)
                return 0xffff0000u;
 
        GEM_BUG_ON(id >= 16);
        return 0x10000u << id;
 }
 
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
 {
        /*
         * The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
         * last_read - hence we always set both read and write busy for
         * last_write.
         */
-       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+       if (id == (u16)I915_ENGINE_CLASS_INVALID)
                return 0xffffffffu;
 
        return (id + 1) | __busy_read_flag(id);
 }
 
 static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
 {
        const struct i915_request *rq;
 
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
                return 0;
 
        /* Beware type-expansion follies! */
-       BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+       BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
        return flag(rq->engine->uabi_class);
 }
 
index b9f504ba3b32056e97fca71b652d34780f2d6593..18ee708585a94073cd51fa54a78f3aac93d7e149 100644 (file)
@@ -20,7 +20,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        drm_clflush_sg(obj->mm.pages);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 }
 
 static int clflush_work(struct dma_fence_work *base)
index 337ba17b1e0eeadec897887df205a7516255dad3..42385277c684325cc89a74c0237616b4b10f2d73 100644 (file)
@@ -2167,8 +2167,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        ext_data.fpriv = file->driver_priv;
        if (client_is_banned(ext_data.fpriv)) {
                DRM_DEBUG("client %s[%d] banned from creating ctx\n",
-                         current->comm,
-                         pid_nr(get_task_pid(current, PIDTYPE_PID)));
+                         current->comm, task_pid_nr(current));
                return -EIO;
        }
 
index 9937b4c341f1a37c8e22fee8ad87e9ac7dfcf218..f86400a191b040813956fd6e00af31a42742962b 100644 (file)
@@ -664,7 +664,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        i915_gem_object_unlock(obj);
 
        if (write_domain)
-               intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
 out_unpin:
        i915_gem_object_unpin_pages(obj);
@@ -784,7 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
        }
 
 out:
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
        obj->mm.dirty = true;
        /* return with the pages pinned */
        return 0;
index f0998f1225af15a16bd8384dcf4df903c958c638..bc3a672261635eb7540a60641bbfa24164ccceab 100644 (file)
@@ -2694,6 +2694,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        err = eb_submit(&eb);
 err_request:
        add_to_client(eb.request, file);
+       i915_request_get(eb.request);
        i915_request_add(eb.request);
 
        if (fences)
@@ -2709,6 +2710,7 @@ err_request:
                        fput(out_fence->file);
                }
        }
+       i915_request_put(eb.request);
 
 err_batch_unpin:
        if (eb.batch_flags & I915_DISPATCH_SECURE)
index a50296cce0d88efb56a07b349ff56b88f7b689ba..a596548c07bf818e397a953210934da5c19b78d3 100644 (file)
@@ -280,7 +280,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                for_each_ggtt_vma(vma, obj)
                        intel_gt_flush_ggtt_writes(vma->vm->gt);
 
-               intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
                for_each_ggtt_vma(vma, obj) {
                        if (vma->iomap)
@@ -308,6 +308,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
        obj->write_domain = 0;
 }
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_flush(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_invalidate(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
        INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
index 458cd51331f1826d0a479c0356b1c85c0ca959d0..4b93591fd5c7a4f586f5d6e3d6606db3c89c66a7 100644 (file)
@@ -13,8 +13,8 @@
 
 #include <drm/i915_drm.h>
 
+#include "display/intel_frontbuffer.h"
 #include "i915_gem_object_types.h"
-
 #include "i915_gem_gtt.h"
 
 void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -463,4 +463,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
                                  unsigned int flags,
                                  const struct i915_sched_attr *attr);
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                 enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                      enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
+
 #endif
index 96008374a4120480baee67ad656d7eb9472c1e07..e3f3944fbd90d1d849f37aee4d109cf95cc73e17 100644 (file)
@@ -150,7 +150,7 @@ struct drm_i915_gem_object {
         */
        u16 write_domain;
 
-       struct intel_frontbuffer *frontbuffer;
+       struct intel_frontbuffer __rcu *frontbuffer;
 
        /** Current tiling stride for the object, if it's tiled. */
        unsigned int tiling_and_stride;
index 4c72d74d65764f6f726eca9db8e6e4aa9e9e5c43..0dbb44d30885350b4c2596e5e4d9615576e0ce8f 100644 (file)
@@ -402,7 +402,7 @@ struct get_pages_work {
 
 static struct sg_table *
 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
-                              struct page **pvec, int num_pages)
+                              struct page **pvec, unsigned long num_pages)
 {
        unsigned int max_segment = i915_sg_segment_size();
        struct sg_table *st;
@@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
        struct get_pages_work *work = container_of(_work, typeof(*work), work);
        struct drm_i915_gem_object *obj = work->obj;
-       const int npages = obj->base.size >> PAGE_SHIFT;
+       const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+       unsigned long pinned;
        struct page **pvec;
-       int pinned, ret;
+       int ret;
 
        ret = -ENOMEM;
        pinned = 0;
@@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 
 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
-       const int num_pages = obj->base.size >> PAGE_SHIFT;
+       const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        struct mm_struct *mm = obj->userptr.mm->mm;
        struct page **pvec;
        struct sg_table *pages;
index ef7bc41ffffad537b981181129bb9772d3dc36fb..5b7ff3ccfa8ea8329657cdaea3555615e77de852 100644 (file)
@@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma)
        if (err)
                return err;
 
+       err = i915_active_acquire(&vma->active);
+       if (err)
+               goto err_unpin;
+
        /*
         * And mark it as a globally pinned object to let the shrinker know
         * it cannot reclaim the object until we release it.
@@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma)
        vma->obj->mm.dirty = true;
 
        return 0;
+
+err_unpin:
+       i915_vma_unpin(vma);
+       return err;
 }
 
 static void __context_unpin_state(struct i915_vma *vma)
 {
        i915_vma_make_shrinkable(vma);
+       i915_active_release(&vma->active);
        __i915_vma_unpin(vma);
 }
 
+static int __ring_active(struct intel_ring *ring)
+{
+       int err;
+
+       err = i915_active_acquire(&ring->vma->active);
+       if (err)
+               return err;
+
+       err = intel_ring_pin(ring);
+       if (err)
+               goto err_active;
+
+       return 0;
+
+err_active:
+       i915_active_release(&ring->vma->active);
+       return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+       intel_ring_unpin(ring);
+       i915_active_release(&ring->vma->active);
+}
+
 __i915_active_call
 static void __intel_context_retire(struct i915_active *active)
 {
@@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active)
                __context_unpin_state(ce->state);
 
        intel_timeline_unpin(ce->timeline);
-       intel_ring_unpin(ce->ring);
+       __ring_retire(ce->ring);
 
        intel_context_put(ce);
 }
@@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active)
 
        intel_context_get(ce);
 
-       err = intel_ring_pin(ce->ring);
+       err = __ring_active(ce->ring);
        if (err)
                goto err_put;
 
@@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active)
 err_timeline:
        intel_timeline_unpin(ce->timeline);
 err_ring:
-       intel_ring_unpin(ce->ring);
+       __ring_retire(ce->ring);
 err_put:
        intel_context_put(ce);
        return err;
index 17f1f1441efc9dba383e51959aa936a4177b8bfa..2b446474e0103e98cdb41ace0138426e93d4467b 100644 (file)
@@ -274,8 +274,8 @@ struct intel_engine_cs {
        u8 class;
        u8 instance;
 
-       u8 uabi_class;
-       u8 uabi_instance;
+       u16 uabi_class;
+       u16 uabi_instance;
 
        u32 uabi_capabilities;
        u32 context_size;
index a459a42ad5c22a3e8e5b9491fd5c8df8fec07045..7e64b7d7d33015526324d1bceed8104c453a3363 100644 (file)
@@ -94,8 +94,9 @@ static int __gt_park(struct intel_wakeref *wf)
                intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
        }
 
+       /* Defer dropping the display power well for 100ms, it's slow! */
        GEM_BUG_ON(!wakeref);
-       intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+       intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
        i915_globals_park();
 
index 9fdefbdc35467399afc5c0709c7b5ba6fd9987b4..d925a1035c9d2f0ab0dd242a5218d79b61579a66 100644 (file)
@@ -845,12 +845,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
        }
 }
 
-static void unwind_wa_tail(struct i915_request *rq)
-{
-       rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
-       assert_ring_tail_valid(rq->ring, rq->tail);
-}
-
 static struct i915_request *
 __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
@@ -863,12 +857,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
        list_for_each_entry_safe_reverse(rq, rn,
                                         &engine->active.requests,
                                         sched.link) {
-
                if (i915_request_completed(rq))
                        continue; /* XXX */
 
                __i915_request_unsubmit(rq);
-               unwind_wa_tail(rq);
 
                /*
                 * Push the request back into the queue for later resubmission.
@@ -1161,13 +1153,29 @@ execlists_schedule_out(struct i915_request *rq)
        i915_request_put(rq);
 }
 
-static u64 execlists_update_context(const struct i915_request *rq)
+static u64 execlists_update_context(struct i915_request *rq)
 {
        struct intel_context *ce = rq->hw_context;
-       u64 desc;
+       u64 desc = ce->lrc_desc;
+       u32 tail;
 
-       ce->lrc_reg_state[CTX_RING_TAIL] =
-               intel_ring_set_tail(rq->ring, rq->tail);
+       /*
+        * WaIdleLiteRestore:bdw,skl
+        *
+        * We should never submit the context with the same RING_TAIL twice
+        * just in case we submit an empty ring, which confuses the HW.
+        *
+        * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
+        * the normal request to be able to always advance the RING_TAIL on
+        * subsequent resubmissions (for lite restore). Should that fail us,
+        * and we try and submit the same tail again, force the context
+        * reload.
+        */
+       tail = intel_ring_set_tail(rq->ring, rq->tail);
+       if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
+               desc |= CTX_DESC_FORCE_RESTORE;
+       ce->lrc_reg_state[CTX_RING_TAIL] = tail;
+       rq->tail = rq->wa_tail;
 
        /*
         * Make sure the context image is complete before we submit it to HW.
@@ -1186,13 +1194,11 @@ static u64 execlists_update_context(const struct i915_request *rq)
         */
        mb();
 
-       desc = ce->lrc_desc;
-       ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
-
        /* Wa_1607138340:tgl */
        if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
                desc |= CTX_DESC_FORCE_RESTORE;
 
+       ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
        return desc;
 }
 
@@ -1703,16 +1709,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                                return;
                        }
-
-                       /*
-                        * WaIdleLiteRestore:bdw,skl
-                        * Apply the wa NOOPs to prevent
-                        * ring:HEAD == rq:TAIL as we resubmit the
-                        * request. See gen8_emit_fini_breadcrumb() for
-                        * where we prepare the padding after the
-                        * end of the request.
-                        */
-                       last->tail = last->wa_tail;
                }
        }
 
@@ -2668,6 +2664,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
+       /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+       batch = gen8_emit_pipe_control(batch,
+                                      PIPE_CONTROL_FLUSH_L3 |
+                                      PIPE_CONTROL_STORE_DATA_INDEX |
+                                      PIPE_CONTROL_CS_STALL |
+                                      PIPE_CONTROL_QW_WRITE,
+                                      LRC_PPHWSP_SCRATCH_ADDR);
+
        batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
        /* WaMediaPoolStateCmdInWABB:bxt,glk */
@@ -4120,17 +4124,18 @@ static void virtual_context_destroy(struct kref *kref)
        for (n = 0; n < ve->num_siblings; n++) {
                struct intel_engine_cs *sibling = ve->siblings[n];
                struct rb_node *node = &ve->nodes[sibling->id].rb;
+               unsigned long flags;
 
                if (RB_EMPTY_NODE(node))
                        continue;
 
-               spin_lock_irq(&sibling->active.lock);
+               spin_lock_irqsave(&sibling->active.lock, flags);
 
                /* Detachment is lazily performed in the execlists tasklet */
                if (!RB_EMPTY_NODE(node))
                        rb_erase_cached(node, &sibling->execlists.virtual);
 
-               spin_unlock_irq(&sibling->active.lock);
+               spin_unlock_irqrestore(&sibling->active.lock, flags);
        }
        GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
 
@@ -4419,9 +4424,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
        ve->base.gt = siblings[0]->gt;
        ve->base.uncore = siblings[0]->uncore;
        ve->base.id = -1;
+
        ve->base.class = OTHER_CLASS;
        ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
        ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+       ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
 
        /*
         * The decision on whether to submit a request using semaphores
index a47d5a7c32c9633f5e1a839b1ee68a5107579055..93026217c121cc58022c5b4cf35af68ac3e318a1 100644 (file)
@@ -1413,14 +1413,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        int len;
        u32 *cs;
 
-       flags |= MI_MM_SPACE_GTT;
-       if (IS_HASWELL(i915))
-               /* These flags are for resource streamer on HSW+ */
-               flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
-       else
-               /* We need to save the extended state for powersaving modes */
-               flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
        len = 4;
        if (IS_GEN(i915, 7))
                len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
@@ -1589,22 +1581,21 @@ static int switch_context(struct i915_request *rq)
        }
 
        if (ce->state) {
-               u32 hw_flags;
+               u32 flags;
 
                GEM_BUG_ON(rq->engine->id != RCS0);
 
-               /*
-                * The kernel context(s) is treated as pure scratch and is not
-                * expected to retain any state (as we sacrifice it during
-                * suspend and on resume it may be corrupted). This is ok,
-                * as nothing actually executes using the kernel context; it
-                * is purely used for flushing user contexts.
-                */
-               hw_flags = 0;
-               if (i915_gem_context_is_kernel(rq->gem_context))
-                       hw_flags = MI_RESTORE_INHIBIT;
+               /* For resource streamer on HSW+ and power context elsewhere */
+               BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+               BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+               flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+               if (!i915_gem_context_is_kernel(rq->gem_context))
+                       flags |= MI_RESTORE_EXT_STATE_EN;
+               else
+                       flags |= MI_RESTORE_INHIBIT;
 
-               ret = mi_set_context(rq, hw_flags);
+               ret = mi_set_context(rq, flags);
                if (ret)
                        return ret;
        }
index e451298d11c32adc8fedad9e86341ee308ccf8d8..2477a1e5a1669cf220ccb85b062885714c100d3d 100644 (file)
 
 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
 
+static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
+                               unsigned long size,
+                               dma_addr_t dma_addr)
+{
+       int ret = 0;
+
+       if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
+               ret = -EINVAL;
+
+       return ret;
+}
+
+static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
+                                  dma_addr_t dma_addr)
+{
+       intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
+}
+
 static int vgpu_gem_get_pages(
                struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct intel_vgpu *vgpu;
        struct sg_table *st;
        struct scatterlist *sg;
-       int i, ret;
+       int i, j, ret;
        gen8_pte_t __iomem *gtt_entries;
        struct intel_vgpu_fb_info *fb_info;
        u32 page_num;
@@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
        if (WARN_ON(!fb_info))
                return -ENODEV;
 
+       vgpu = fb_info->obj->vgpu;
+       if (WARN_ON(!vgpu))
+               return -ENODEV;
+
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (unlikely(!st))
                return -ENOMEM;
@@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
        gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
                (fb_info->start >> PAGE_SHIFT);
        for_each_sg(st->sgl, sg, page_num, i) {
+               dma_addr_t dma_addr =
+                       GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+               if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
                sg->offset = 0;
                sg->length = PAGE_SIZE;
-               sg_dma_address(sg) =
-                       GEN8_DECODE_PTE(readq(&gtt_entries[i]));
                sg_dma_len(sg) = PAGE_SIZE;
+               sg_dma_address(sg) = dma_addr;
        }
 
        __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+out:
+       if (ret) {
+               dma_addr_t dma_addr;
+
+               for_each_sg(st->sgl, sg, i, j) {
+                       dma_addr = sg_dma_address(sg);
+                       if (dma_addr)
+                               vgpu_unpin_dma_address(vgpu, dma_addr);
+               }
+               sg_free_table(st);
+               kfree(st);
+       }
+
+       return ret;
 
-       return 0;
 }
 
 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
                struct sg_table *pages)
 {
+       struct scatterlist *sg;
+
+       if (obj->base.dma_buf) {
+               struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
+               struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+               struct intel_vgpu *vgpu = obj->vgpu;
+               int i;
+
+               for_each_sg(pages->sgl, sg, fb_info->size, i)
+                       vgpu_unpin_dma_address(vgpu,
+                                              sg_dma_address(sg));
+       }
+
        sg_free_table(pages);
        kfree(pages);
 }
@@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
        drm_gem_private_object_init(dev, &obj->base,
                roundup(info->size, PAGE_SIZE));
        i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
+       i915_gem_object_set_readonly(obj);
 
        obj->read_domains = I915_GEM_DOMAIN_GTT;
        obj->write_domain = 0;
index bb9fe6bf5275d2eb60f1dd132b333e3ca39260df..1043e6d564df396b3ef9192ded47343b145bed2c 100644 (file)
@@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                        gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
                        engine_mask |= BIT(VCS1);
                }
+               if (data & GEN9_GRDOM_GUC) {
+                       gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
+                       vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+               }
                engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
        }
 
@@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int guc_status_read(struct intel_vgpu *vgpu,
+                          unsigned int offset, void *p_data,
+                          unsigned int bytes)
+{
+       /* keep MIA_IN_RESET before clearing */
+       read_vreg(vgpu, offset, p_data, bytes);
+       vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
+       return 0;
+}
+
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
        MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
        MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+       MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
+
        return 0;
 }
 
index 4862fb12778e096fd35ee5e71b8b27dc87a1ba9e..b19a3b1ea4c11cac83df40f5065789104d4993b4 100644 (file)
@@ -62,6 +62,8 @@ struct intel_gvt_mpt {
                                  unsigned long size, dma_addr_t *dma_addr);
        void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
 
+       int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
        int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
                              unsigned long mfn, unsigned int nr, bool map);
        int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
index 04a5a0d9082393de81200181074b9683ed6bce81..3259a1fa69e105a2c91bca299dcec8d063bdfdc0 100644 (file)
@@ -1916,6 +1916,28 @@ err_unlock:
        return ret;
 }
 
+static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+       struct kvmgt_guest_info *info;
+       struct gvt_dma *entry;
+       int ret = 0;
+
+       if (!handle_valid(handle))
+               return -ENODEV;
+
+       info = (struct kvmgt_guest_info *)handle;
+
+       mutex_lock(&info->vgpu->vdev.cache_lock);
+       entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+       if (entry)
+               kref_get(&entry->ref);
+       else
+               ret = -ENOMEM;
+       mutex_unlock(&info->vgpu->vdev.cache_lock);
+
+       return ret;
+}
+
 static void __gvt_dma_release(struct kref *ref)
 {
        struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
@@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
        .gfn_to_mfn = kvmgt_gfn_to_pfn,
        .dma_map_guest_page = kvmgt_dma_map_guest_page,
        .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
+       .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
        .set_opregion = kvmgt_set_opregion,
        .set_edid = kvmgt_set_edid,
        .get_vfio_device = kvmgt_get_vfio_device,
index 0f944012812304c052ad035917ffa442bd381a24..9ad224df9c68bb2aedbb58fbc0c4d4387a95eada 100644 (file)
@@ -254,6 +254,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
        intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
 }
 
+/**
+ * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
+ * @vgpu: a vGPU
+ * @dma_addr: guest dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int
+intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
+                                       dma_addr_t dma_addr)
+{
+       return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
+}
+
 /**
  * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
  * @vgpu: a vGPU
index d5a6e4e3d0fd7a10f6685d5e8b0365c915eb12fa..85bd9bf4f6eee58b3c00e567a7c5286a017dccb9 100644 (file)
@@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
  */
 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
 {
-       mutex_lock(&vgpu->gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
        vgpu->active = true;
-       mutex_unlock(&vgpu->gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
index e29bc137e7bad722f6b6b7d9bfe7a763ceefec4c..21aa08f558110e842267acc24b87a2da0ecae6cd 100644 (file)
@@ -1660,8 +1660,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
        (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
 
 /* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
-       (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)                   \
+       (IS_CANNONLAKE(dev_priv) ||                                     \
+        IS_SKL_GT3(dev_priv) ||                                        \
+        IS_SKL_GT4(dev_priv))
 
 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
index b9eb6b3149b711cea3a4a1673c5ae6bcf7c7bc23..905890e3ac24139a470dfc2ea47529b4a08317eb 100644 (file)
@@ -45,6 +45,7 @@
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
@@ -160,7 +161,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
         * We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        if (copy_from_user(vaddr, user_data, args->size))
                return -EFAULT;
@@ -168,7 +169,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        drm_clflush_virt_range(vaddr, args->size);
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        return 0;
 }
 
@@ -588,7 +589,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                goto out_unpin;
        }
 
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -630,7 +631,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                user_data += page_length;
                offset += page_length;
        }
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
@@ -720,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
                offset = 0;
        }
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        i915_gem_object_unlock_fence(obj, fence);
 
        return ret;
@@ -1053,6 +1054,18 @@ out:
        return err;
 }
 
+static int __intel_context_flush_retire(struct intel_context *ce)
+{
+       struct intel_timeline *tl;
+
+       tl = intel_context_timeline_lock(ce);
+       if (IS_ERR(tl))
+               return PTR_ERR(tl);
+
+       intel_context_timeline_unlock(tl);
+       return 0;
+}
+
 static int __intel_engines_record_defaults(struct intel_gt *gt)
 {
        struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -1121,13 +1134,20 @@ err_rq:
                if (!rq)
                        continue;
 
-               /* We want to be able to unbind the state from the GGTT */
-               GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
-
+               GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
+                                    &rq->hw_context->flags));
                state = rq->hw_context->state;
                if (!state)
                        continue;
 
+               /* Serialise with retirement on another CPU */
+               err = __intel_context_flush_retire(rq->hw_context);
+               if (err)
+                       goto out;
+
+               /* We want to be able to unbind the state from the GGTT */
+               GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+
                /*
                 * As we will hold a reference to the logical state, it will
                 * not be torn down with the context, and importantly the
index 6239a9adbf14e4cf0e1973039d58303af1737101..d6ce57d309589408039e5ab9d08aceb95fe791e6 100644 (file)
@@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
        pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
        vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
        do {
+               GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
                vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
 
                iter->dma += I915_GTT_PAGE_SIZE;
@@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 
        vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
        do {
+               GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
                vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
 
                iter.dma += I915_GTT_PAGE_SIZE;
@@ -2847,7 +2849,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
         * readback check when writing GTT PTE entries.
         */
        if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
-               ggtt->gsm = ioremap_nocache(phys_addr, size);
+               ggtt->gsm = ioremap(phys_addr, size);
        else
                ggtt->gsm = ioremap_wc(phys_addr, size);
        if (!ggtt->gsm) {
@@ -3304,7 +3306,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
 
 static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 {
-       struct i915_vma *vma, *vn;
+       struct i915_vma *vma;
        bool flush = false;
        int open;
 
@@ -3319,15 +3321,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
        open = atomic_xchg(&ggtt->vm.open, 0);
 
        /* clflush objects bound into the GGTT and rebind them. */
-       list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+       list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
                        continue;
 
-               if (!__i915_vma_unbind(vma))
-                       continue;
-
                clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
                WARN_ON(i915_vma_bind(vma,
                                      obj ? obj->cache_level : 0,
index 65d7c2e599de03718c3466f165b75183b2d27ad1..2ae14bc149317afb65d2b8d04bd6fefd9a1f33e5 100644 (file)
@@ -2078,20 +2078,12 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
        u32 *reg_state = ce->lrc_reg_state;
        int i;
 
-       if (IS_GEN(stream->perf->i915, 12)) {
-               u32 format = stream->oa_buffer.format;
+       reg_state[ctx_oactxctrl + 1] =
+               (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+               (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+               GEN8_OA_COUNTER_RESUME;
 
-               reg_state[ctx_oactxctrl + 1] =
-                       (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
-                       (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
-       } else {
-               reg_state[ctx_oactxctrl + 1] =
-                       (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
-                       (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
-                       GEN8_OA_COUNTER_RESUME;
-       }
-
-       for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++)
+       for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
                reg_state[ctx_flexeu0 + i * 2 + 1] =
                        oa_config_flex_reg(stream->oa_config, flex_regs[i]);
 
@@ -2224,34 +2216,51 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
        return err;
 }
 
-static int gen12_emit_oar_config(struct intel_context *ce, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
 {
-       struct i915_request *rq;
-       u32 *cs;
-       int err = 0;
-
-       rq = i915_request_create(ce);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
-       cs = intel_ring_begin(rq, 4);
-       if (IS_ERR(cs)) {
-               err = PTR_ERR(cs);
-               goto out;
-       }
-
-       *cs++ = MI_LOAD_REGISTER_IMM(1);
-       *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base));
-       *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
-                             enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0);
-       *cs++ = MI_NOOP;
+       int err;
+       struct intel_context *ce = stream->pinned_ctx;
+       u32 format = stream->oa_buffer.format;
+       struct flex regs_context[] = {
+               {
+                       GEN8_OACTXCONTROL,
+                       stream->perf->ctx_oactxctrl_offset + 1,
+                       enable ? GEN8_OA_COUNTER_RESUME : 0,
+               },
+       };
+       /* Offsets in regs_lri are not used since this configuration is only
+        * applied using LRI. Initialize the correct offsets for posterity.
+        */
+#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
+       struct flex regs_lri[] = {
+               {
+                       GEN12_OAR_OACONTROL,
+                       GEN12_OAR_OACONTROL_OFFSET + 1,
+                       (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+                       (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+               },
+               {
+                       RING_CONTEXT_CONTROL(ce->engine->mmio_base),
+                       CTX_CONTEXT_CONTROL,
+                       _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+                                     enable ?
+                                     GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
+                                     0)
+               },
+       };
 
-       intel_ring_advance(rq, cs);
+       /* Modify the context image of pinned context with regs_context*/
+       err = intel_context_lock_pinned(ce);
+       if (err)
+               return err;
 
-out:
-       i915_request_add(rq);
+       err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
+       intel_context_unlock_pinned(ce);
+       if (err)
+               return err;
 
-       return err;
+       /* Apply regs_lri using LRI with pinned context */
+       return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
 }
 
 /*
@@ -2277,53 +2286,16 @@ out:
  *   per-context OA state.
  *
  * Note: it's only the RCS/Render context that has any OA state.
+ * Note: the first flex register passed must always be R_PWR_CLK_STATE
  */
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
-                                     const struct i915_oa_config *oa_config)
+static int oa_configure_all_contexts(struct i915_perf_stream *stream,
+                                    struct flex *regs,
+                                    size_t num_regs)
 {
        struct drm_i915_private *i915 = stream->perf->i915;
-       /* The MMIO offsets for Flex EU registers aren't contiguous */
-       const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
-#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
-       struct flex regs[] = {
-               {
-                       GEN8_R_PWR_CLK_STATE,
-                       CTX_R_PWR_CLK_STATE,
-               },
-               {
-                       IS_GEN(i915, 12) ?
-                       GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL,
-                       stream->perf->ctx_oactxctrl_offset + 1,
-               },
-               { EU_PERF_CNTL0, ctx_flexeuN(0) },
-               { EU_PERF_CNTL1, ctx_flexeuN(1) },
-               { EU_PERF_CNTL2, ctx_flexeuN(2) },
-               { EU_PERF_CNTL3, ctx_flexeuN(3) },
-               { EU_PERF_CNTL4, ctx_flexeuN(4) },
-               { EU_PERF_CNTL5, ctx_flexeuN(5) },
-               { EU_PERF_CNTL6, ctx_flexeuN(6) },
-       };
-#undef ctx_flexeuN
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx, *cn;
-       size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs);
-       int i, err;
-
-       if (IS_GEN(i915, 12)) {
-               u32 format = stream->oa_buffer.format;
-
-               regs[1].value =
-                       (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
-                       (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
-       } else {
-               regs[1].value =
-                       (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
-                       (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
-                       GEN8_OA_COUNTER_RESUME;
-       }
-
-       for (i = 2; !!ctx_flexeu0 && i < array_size; i++)
-               regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+       int err;
 
        lockdep_assert_held(&stream->perf->lock);
 
@@ -2353,7 +2325,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
 
                spin_unlock(&i915->gem.contexts.lock);
 
-               err = gen8_configure_context(ctx, regs, array_size);
+               err = gen8_configure_context(ctx, regs, num_regs);
                if (err) {
                        i915_gem_context_put(ctx);
                        return err;
@@ -2378,7 +2350,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
 
                regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
 
-               err = gen8_modify_self(ce, regs, array_size);
+               err = gen8_modify_self(ce, regs, num_regs);
                if (err)
                        return err;
        }
@@ -2386,6 +2358,56 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
        return 0;
 }
 
+static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
+                                       const struct i915_oa_config *oa_config)
+{
+       struct flex regs[] = {
+               {
+                       GEN8_R_PWR_CLK_STATE,
+                       CTX_R_PWR_CLK_STATE,
+               },
+       };
+
+       return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+}
+
+static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+                                     const struct i915_oa_config *oa_config)
+{
+       /* The MMIO offsets for Flex EU registers aren't contiguous */
+       const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
+       struct flex regs[] = {
+               {
+                       GEN8_R_PWR_CLK_STATE,
+                       CTX_R_PWR_CLK_STATE,
+               },
+               {
+                       GEN8_OACTXCONTROL,
+                       stream->perf->ctx_oactxctrl_offset + 1,
+               },
+               { EU_PERF_CNTL0, ctx_flexeuN(0) },
+               { EU_PERF_CNTL1, ctx_flexeuN(1) },
+               { EU_PERF_CNTL2, ctx_flexeuN(2) },
+               { EU_PERF_CNTL3, ctx_flexeuN(3) },
+               { EU_PERF_CNTL4, ctx_flexeuN(4) },
+               { EU_PERF_CNTL5, ctx_flexeuN(5) },
+               { EU_PERF_CNTL6, ctx_flexeuN(6) },
+       };
+#undef ctx_flexeuN
+       int i;
+
+       regs[1].value =
+               (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+               (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+               GEN8_OA_COUNTER_RESUME;
+
+       for (i = 2; i < ARRAY_SIZE(regs); i++)
+               regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+
+       return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+}
+
 static int gen8_enable_metric_set(struct i915_perf_stream *stream)
 {
        struct intel_uncore *uncore = stream->uncore;
@@ -2464,7 +2486,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
         * to make sure all slices/subslices are ON before writing to NOA
         * registers.
         */
-       ret = lrc_configure_all_contexts(stream, oa_config);
+       ret = gen12_configure_all_contexts(stream, oa_config);
        if (ret)
                return ret;
 
@@ -2474,8 +2496,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
         * requested this.
         */
        if (stream->ctx) {
-               ret = gen12_emit_oar_config(stream->pinned_ctx,
-                                           oa_config != NULL);
+               ret = gen12_configure_oar_context(stream, true);
                if (ret)
                        return ret;
        }
@@ -2509,11 +2530,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
        struct intel_uncore *uncore = stream->uncore;
 
        /* Reset all contexts' slices/subslices configurations. */
-       lrc_configure_all_contexts(stream, NULL);
+       gen12_configure_all_contexts(stream, NULL);
 
        /* disable the context save/restore or OAR counters */
        if (stream->ctx)
-               gen12_emit_oar_config(stream->pinned_ctx, false);
+               gen12_configure_oar_context(stream, false);
 
        /* Make sure we disable noa to save power. */
        intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2713,7 +2734,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
                return -EINVAL;
        }
 
-       if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
+       if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
+           (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
                DRM_DEBUG("Only OA report sampling supported\n");
                return -EINVAL;
        }
@@ -2745,7 +2767,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 
        format_size = perf->oa_formats[props->oa_format].size;
 
-       stream->sample_flags |= SAMPLE_OA_REPORT;
+       stream->sample_flags = props->sample_flags;
        stream->sample_size += format_size;
 
        stream->oa_buffer.format_size = format_size;
@@ -2854,7 +2876,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
                return;
 
        stream = engine->i915->perf.exclusive_stream;
-       if (stream)
+       /*
+        * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
+        * is already doing that, so nothing to be done for gen12 here.
+        */
+       if (stream && INTEL_GEN(stream->perf->i915) < 12)
                gen8_update_reg_state_unlocked(ce, stream);
 }
 
index 2814218c5ba18773f053d05e2733f5e67b38de17..d6d2e6fb867432a035b9df0e60a63823ef94e72f 100644 (file)
@@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt)
        return ktime_to_ns(ktime_sub(ktime_get(), kt));
 }
 
-static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
-{
-       u64 val;
-
-       /*
-        * We think we are runtime suspended.
-        *
-        * Report the delta from when the device was suspended to now,
-        * on top of the last known real value, as the approximated RC6
-        * counter value.
-        */
-       val = ktime_since(pmu->sleep_last);
-       val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
-       pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-
-       return val;
-}
-
-static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
-{
-       /*
-        * If we are coming back from being runtime suspended we must
-        * be careful not to report a larger value than returned
-        * previously.
-        */
-       if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
-               pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
-               pmu->sample[__I915_SAMPLE_RC6].cur = val;
-       } else {
-               val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
-       }
-
-       return val;
-}
-
 static u64 get_rc6(struct intel_gt *gt)
 {
        struct drm_i915_private *i915 = gt->i915;
        struct i915_pmu *pmu = &i915->pmu;
        unsigned long flags;
+       bool awake = false;
        u64 val;
 
-       val = 0;
        if (intel_gt_pm_get_if_awake(gt)) {
                val = __get_rc6(gt);
                intel_gt_pm_put_async(gt);
+               awake = true;
        }
 
        spin_lock_irqsave(&pmu->lock, flags);
 
-       if (val)
-               val = __pmu_update_rc6(pmu, val);
+       if (awake) {
+               pmu->sample[__I915_SAMPLE_RC6].cur = val;
+       } else {
+               /*
+                * We think we are runtime suspended.
+                *
+                * Report the delta from when the device was suspended to now,
+                * on top of the last known real value, as the approximated RC6
+                * counter value.
+                */
+               val = ktime_since(pmu->sleep_last);
+               val += pmu->sample[__I915_SAMPLE_RC6].cur;
+       }
+
+       if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
+               val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
        else
-               val = __pmu_estimate_rc6(pmu);
+               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
 
        spin_unlock_irqrestore(&pmu->lock, flags);
 
@@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915)
        struct i915_pmu *pmu = &i915->pmu;
 
        if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
-               __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+               pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
 
        pmu->sleep_last = ktime_get();
 }
 
-static void unpark_rc6(struct drm_i915_private *i915)
-{
-       struct i915_pmu *pmu = &i915->pmu;
-
-       /* Estimate how long we slept and accumulate that into rc6 counters */
-       if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
-               __pmu_estimate_rc6(pmu);
-}
-
 #else
 
 static u64 get_rc6(struct intel_gt *gt)
@@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt)
 }
 
 static void park_rc6(struct drm_i915_private *i915) {}
-static void unpark_rc6(struct drm_i915_private *i915) {}
 
 #endif
 
@@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
         */
        __i915_pmu_maybe_start_timer(pmu);
 
-       unpark_rc6(i915);
-
        spin_unlock_irq(&pmu->lock);
 }
 
@@ -1107,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
 
-       if (!is_igp(i915))
+       if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
-                                     "i915-%s",
+                                     "i915_%s",
                                      dev_name(i915->drm.dev));
-       else
+               if (pmu->name) {
+                       /* tools/perf reserves colons as special. */
+                       strreplace((char *)pmu->name, ':', '_');
+               }
+       } else {
                pmu->name = "i915";
+       }
        if (!pmu->name)
                goto err;
 
index bf52e3983631b7278d4930b5c752190e01954e56..6c1647c5daf255ff4d27fb538dd2ec358ed782e2 100644 (file)
@@ -18,7 +18,7 @@ enum {
        __I915_SAMPLE_FREQ_ACT = 0,
        __I915_SAMPLE_FREQ_REQ,
        __I915_SAMPLE_RC6,
-       __I915_SAMPLE_RC6_ESTIMATED,
+       __I915_SAMPLE_RC6_LAST_REPORTED,
        __I915_NUM_PMU_SAMPLERS
 };
 
index 73079b503724ddc92481056231d1fa0e217a969f..094011b8f64d6ad6339a0168a781b0e6d26ecdc9 100644 (file)
@@ -4177,7 +4177,13 @@ enum {
 #define  CPSSUNIT_CLKGATE_DIS          REG_BIT(9)
 
 #define UNSLICE_UNIT_LEVEL_CLKGATE     _MMIO(0x9434)
-#define  VFUNIT_CLKGATE_DIS            (1 << 20)
+#define   VFUNIT_CLKGATE_DIS           REG_BIT(20)
+#define   HSUNIT_CLKGATE_DIS           REG_BIT(8)
+#define   VSUNIT_CLKGATE_DIS           REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2    _MMIO(0x94e4)
+#define   VSUNIT_CLKGATE_DIS_TGL       REG_BIT(19)
+#define   PSDUNIT_CLKGATE_DIS          REG_BIT(5)
 
 #define INF_UNIT_LEVEL_CLKGATE         _MMIO(0x9560)
 #define   CGPSF_CLKGATE_DIS            (1 << 3)
@@ -9405,11 +9411,9 @@ enum skl_power_gate {
 #define _ICL_AUX_REG_IDX(pw_idx)       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
 #define _ICL_AUX_ANAOVRD1_A            0x162398
 #define _ICL_AUX_ANAOVRD1_B            0x6C398
-#define _TGL_AUX_ANAOVRD1_C            0x160398
 #define ICL_AUX_ANAOVRD1(pw_idx)       _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
                                                    _ICL_AUX_ANAOVRD1_A, \
-                                                   _ICL_AUX_ANAOVRD1_B, \
-                                                   _TGL_AUX_ANAOVRD1_C))
+                                                   _ICL_AUX_ANAOVRD1_B))
 #define   ICL_AUX_ANAOVRD1_LDO_BYPASS  (1 << 7)
 #define   ICL_AUX_ANAOVRD1_ENABLE      (1 << 0)
 
@@ -11994,7 +11998,7 @@ enum skl_power_gate {
 /* This register controls the Display State Buffer (DSB) engines. */
 #define _DSBSL_INSTANCE_BASE           0x70B00
 #define DSBSL_INSTANCE(pipe, id)       (_DSBSL_INSTANCE_BASE + \
-                                        (pipe) * 0x1000 + (id) * 100)
+                                        (pipe) * 0x1000 + (id) * 0x100)
 #define DSB_HEAD(pipe, id)             _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
 #define DSB_TAIL(pipe, id)             _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
 #define DSB_CTRL(pipe, id)             _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
index bbd71af00a913325a7e0990c4e3dfed097eaeb11..765bec89fc0decbf7779c7475247610fc9dbe1b8 100644 (file)
@@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
 }
 
 static int
-__i915_request_await_execution(struct i915_request *rq,
-                              struct i915_request *signal,
-                              void (*hook)(struct i915_request *rq,
-                                           struct dma_fence *signal),
-                              gfp_t gfp)
+__await_execution(struct i915_request *rq,
+                 struct i915_request *signal,
+                 void (*hook)(struct i915_request *rq,
+                              struct dma_fence *signal),
+                 gfp_t gfp)
 {
        struct execute_cb *cb;
 
@@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
        }
        spin_unlock_irq(&signal->lock);
 
+       /* Copy across semaphore status as we need the same behaviour */
+       rq->sched.flags |= signal->sched.flags;
        return 0;
 }
 
@@ -811,31 +813,21 @@ already_busywaiting(struct i915_request *rq)
 }
 
 static int
-emit_semaphore_wait(struct i915_request *to,
-                   struct i915_request *from,
-                   gfp_t gfp)
+__emit_semaphore_wait(struct i915_request *to,
+                     struct i915_request *from,
+                     u32 seqno)
 {
        const int has_token = INTEL_GEN(to->i915) >= 12;
        u32 hwsp_offset;
-       int len;
+       int len, err;
        u32 *cs;
 
        GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
 
-       /* Just emit the first semaphore we see as request space is limited. */
-       if (already_busywaiting(to) & from->engine->mask)
-               goto await_fence;
-
-       if (i915_request_await_start(to, from) < 0)
-               goto await_fence;
-
-       /* Only submit our spinner after the signaler is running! */
-       if (__i915_request_await_execution(to, from, NULL, gfp))
-               goto await_fence;
-
        /* We need to pin the signaler's HWSP until we are finished reading. */
-       if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
-               goto await_fence;
+       err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
+       if (err)
+               return err;
 
        len = 4;
        if (has_token)
@@ -858,7 +850,7 @@ emit_semaphore_wait(struct i915_request *to,
                 MI_SEMAPHORE_POLL |
                 MI_SEMAPHORE_SAD_GTE_SDD) +
                has_token;
-       *cs++ = from->fence.seqno;
+       *cs++ = seqno;
        *cs++ = hwsp_offset;
        *cs++ = 0;
        if (has_token) {
@@ -867,6 +859,28 @@ emit_semaphore_wait(struct i915_request *to,
        }
 
        intel_ring_advance(to, cs);
+       return 0;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+                   struct i915_request *from,
+                   gfp_t gfp)
+{
+       /* Just emit the first semaphore we see as request space is limited. */
+       if (already_busywaiting(to) & from->engine->mask)
+               goto await_fence;
+
+       if (i915_request_await_start(to, from) < 0)
+               goto await_fence;
+
+       /* Only submit our spinner after the signaler is running! */
+       if (__await_execution(to, from, NULL, gfp))
+               goto await_fence;
+
+       if (__emit_semaphore_wait(to, from, from->fence.seqno))
+               goto await_fence;
+
        to->sched.semaphores |= from->engine->mask;
        to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
        return 0;
@@ -980,6 +994,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
        return 0;
 }
 
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+                                         struct dma_fence *fence)
+{
+       return __intel_timeline_sync_is_later(tl,
+                                             fence->context,
+                                             fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+                                        const struct dma_fence *fence)
+{
+       return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
+static int
+__i915_request_await_execution(struct i915_request *to,
+                              struct i915_request *from,
+                              void (*hook)(struct i915_request *rq,
+                                           struct dma_fence *signal))
+{
+       int err;
+
+       /* Submit both requests at the same time */
+       err = __await_execution(to, from, hook, I915_FENCE_GFP);
+       if (err)
+               return err;
+
+       /* Squash repeated depenendices to the same timelines */
+       if (intel_timeline_sync_has_start(i915_request_timeline(to),
+                                         &from->fence))
+               return 0;
+
+       /* Ensure both start together [after all semaphores in signal] */
+       if (intel_engine_has_semaphores(to->engine))
+               err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+       else
+               err = i915_request_await_start(to, from);
+       if (err < 0)
+               return err;
+
+       /* Couple the dependency tree for PI on this exposed to->fence */
+       if (to->engine->schedule) {
+               err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+               if (err < 0)
+                       return err;
+       }
+
+       return intel_timeline_sync_set_start(i915_request_timeline(to),
+                                            &from->fence);
+}
+
 int
 i915_request_await_execution(struct i915_request *rq,
                             struct dma_fence *fence,
@@ -1013,8 +1078,7 @@ i915_request_await_execution(struct i915_request *rq,
                if (dma_fence_is_i915(fence))
                        ret = __i915_request_await_execution(rq,
                                                             to_request(fence),
-                                                            hook,
-                                                            I915_FENCE_GFP);
+                                                            hook);
                else
                        ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
                                                            I915_FENCE_TIMEOUT,
index 010d67f48ad9448f96887a22c9bb81710d976304..247a9671bca53715c0650ecbe45a61d151c84fd6 100644 (file)
@@ -474,7 +474,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
         * so we may be called out-of-order.
         */
        list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
-               GEM_BUG_ON(!node_signaled(dep->signaler));
                GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
                list_del(&dep->wait_link);
index 07552cd544f21d29d1fce2051829d1c55fe3afee..8538ee7a521de21b6006304e8577865a58498efc 100644 (file)
@@ -78,12 +78,11 @@ static const struct dma_fence_ops fence_ops = {
 void dma_fence_work_init(struct dma_fence_work *f,
                         const struct dma_fence_work_ops *ops)
 {
+       f->ops = ops;
        spin_lock_init(&f->lock);
        dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
        i915_sw_fence_init(&f->chain, fence_notify);
        INIT_WORK(&f->work, fence_work);
-
-       f->ops = ops;
 }
 
 int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
index e5512f26e20a69f22ae0f8797d736d41fdfe1550..01c822256b395d5edd7b0ee6e2cf59cab5ad6e46 100644 (file)
@@ -1104,8 +1104,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                return err;
 
        if (flags & EXEC_OBJECT_WRITE) {
-               if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
-                       i915_active_add_request(&obj->frontbuffer->write, rq);
+               struct intel_frontbuffer *front;
+
+               front = __intel_frontbuffer_get(obj);
+               if (unlikely(front)) {
+                       if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+                               i915_active_add_request(&front->write, rq);
+                       intel_frontbuffer_put(front);
+               }
 
                dma_resv_add_excl_fence(vma->resv, &rq->fence);
                obj->write_domain = I915_GEM_DOMAIN_RENDER;
index 809bff955b5acecf0c28a4f71ccf50a1d414c7e9..86379eddc908301741c0c83bc4f882865adb074d 100644 (file)
@@ -4291,8 +4291,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
                                &crtc_state->wm.skl.optimal.planes[plane_id];
 
                        if (plane_id == PLANE_CURSOR) {
-                               if (WARN_ON(wm->wm[level].min_ddb_alloc >
-                                           total[PLANE_CURSOR])) {
+                               if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
+                                       WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
                                        blocks = U32_MAX;
                                        break;
                                }
@@ -6565,6 +6565,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
        /* WaEnable32PlaneMode:icl */
        I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
                   _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
+
+       /*
+        * Wa_1408615072:icl,ehl  (vsunit)
+        * Wa_1407596294:icl,ehl  (hsunit)
+        */
+       intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
+                        0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+       /* Wa_1407352427:icl,ehl */
+       intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+                        0, PSDUNIT_CLKGATE_DIS);
 }
 
 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
index 35cc69a3a1b99abcb7e7fa181bac46abec8fcd74..05364eca20f7537d6ae6f651ba3c065b4305af72 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef __I915_SELFTESTS_RANDOM_H__
 #define __I915_SELFTESTS_RANDOM_H__
 
+#include <linux/math64.h>
 #include <linux/random.h>
 
 #include "../i915_selftest.h"
index d6214d3c8b337b5b6ab4c1f19fb17fbca003f41c..ef4c630afe3fca0b3e8158f044103dc762782104 100644 (file)
@@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
        for_each_available_child_of_node(dev->of_node, child) {
                panel = of_drm_find_panel(child);
                if (IS_ERR(panel)) {
-                       dev_err(dev, "failed to find panel try bridge (%lu)\n",
+                       dev_err(dev, "failed to find panel try bridge (%ld)\n",
                                PTR_ERR(panel));
+                       panel = NULL;
+
                        bridge = of_drm_find_bridge(child);
                        if (IS_ERR(bridge)) {
-                               dev_err(dev, "failed to find bridge (%lu)\n",
+                               dev_err(dev, "failed to find bridge (%ld)\n",
                                        PTR_ERR(bridge));
                                return PTR_ERR(bridge);
                        }
index f80a8ba759770f82fa435cc97f5b63649477c52e..3305a94fc9305e1ead84aa1a8d359940e76a0151 100644 (file)
@@ -215,11 +215,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_ddp_comp *comp;
        int i, count = 0;
+       unsigned int local_index = plane - mtk_crtc->planes;
 
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                comp = mtk_crtc->ddp_comp[i];
-               if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
-                       *local_layer = plane->index - count;
+               if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
+                       *local_layer = local_index - count;
                        return comp;
                }
                count += mtk_ddp_comp_layer_nr(comp);
@@ -310,7 +311,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
 
                plane_state = to_mtk_plane_state(plane->state);
                comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
-               mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
+               if (comp)
+                       mtk_ddp_comp_layer_config(comp, local_layer,
+                                                 plane_state);
        }
 
        return 0;
@@ -386,8 +389,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
                        comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
                                                          &local_layer);
 
-                       mtk_ddp_comp_layer_config(comp, local_layer,
-                                                 plane_state);
+                       if (comp)
+                               mtk_ddp_comp_layer_config(comp, local_layer,
+                                                         plane_state);
                        plane_state->pending.config = false;
                }
                mtk_crtc->pending_planes = false;
@@ -401,7 +405,9 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
        struct mtk_ddp_comp *comp;
 
        comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
-       return mtk_ddp_comp_layer_check(comp, local_layer, state);
+       if (comp)
+               return mtk_ddp_comp_layer_check(comp, local_layer, state);
+       return 0;
 }
 
 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
index e9931bbbe846478efce7b294e001d1b916ed4493..d77c9f484ce3d04b9d8e0088cd2c01802b8c0881 100644 (file)
@@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
 {
        u32 timcon0, timcon1, timcon2, timcon3;
-       u32 ui, cycle_time;
+       u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
        struct mtk_phy_timing *timing = &dsi->phy_timing;
 
-       ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
-       cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+       timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+       timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+       timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
+                            timing->da_hs_prepare;
+       timing->da_hs_trail = timing->da_hs_prepare + 1;
 
-       timing->lpx = NS_TO_CYCLE(60, cycle_time);
-       timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
-       timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
-       timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+       timing->ta_go = 4 * timing->lpx - 2;
+       timing->ta_sure = timing->lpx + 2;
+       timing->ta_get = 4 * timing->lpx;
+       timing->da_hs_exit = 2 * timing->lpx + 1;
 
-       timing->ta_go = 4 * timing->lpx;
-       timing->ta_sure = 3 * timing->lpx / 2;
-       timing->ta_get = 5 * timing->lpx;
-       timing->da_hs_exit = 2 * timing->lpx;
-
-       timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
-       timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
-
-       timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
-       timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
-       timing->clk_hs_exit = 2 * timing->lpx;
+       timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+       timing->clk_hs_post = timing->clk_hs_prepare + 8;
+       timing->clk_hs_trail = timing->clk_hs_prepare;
+       timing->clk_hs_zero = timing->clk_hs_trail * 4;
+       timing->clk_hs_exit = 2 * timing->clk_hs_trail;
 
        timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
                  timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
@@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
                        dsi_tmp_buf_bpp - 10);
 
        data_phy_cycles = timing->lpx + timing->da_hs_prepare +
-                                 timing->da_hs_zero + timing->da_hs_exit + 2;
+                         timing->da_hs_zero + timing->da_hs_exit + 3;
 
        if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
-               if (vm->hfront_porch * dsi_tmp_buf_bpp >
+               if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
                    data_phy_cycles * dsi->lanes + 18) {
-                       horizontal_frontporch_byte = vm->hfront_porch *
-                                                    dsi_tmp_buf_bpp -
-                                                    data_phy_cycles *
-                                                    dsi->lanes - 18;
+                       horizontal_frontporch_byte =
+                               vm->hfront_porch * dsi_tmp_buf_bpp -
+                               (data_phy_cycles * dsi->lanes + 18) *
+                               vm->hfront_porch /
+                               (vm->hfront_porch + vm->hback_porch);
+
+                       horizontal_backporch_byte =
+                               horizontal_backporch_byte -
+                               (data_phy_cycles * dsi->lanes + 18) *
+                               vm->hback_porch /
+                               (vm->hfront_porch + vm->hback_porch);
                } else {
                        DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
                        horizontal_frontporch_byte = vm->hfront_porch *
                                                     dsi_tmp_buf_bpp;
                }
        } else {
-               if (vm->hfront_porch * dsi_tmp_buf_bpp >
+               if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
                    data_phy_cycles * dsi->lanes + 12) {
-                       horizontal_frontporch_byte = vm->hfront_porch *
-                                                    dsi_tmp_buf_bpp -
-                                                    data_phy_cycles *
-                                                    dsi->lanes - 12;
+                       horizontal_frontporch_byte =
+                               vm->hfront_porch * dsi_tmp_buf_bpp -
+                               (data_phy_cycles * dsi->lanes + 12) *
+                               vm->hfront_porch /
+                               (vm->hfront_porch + vm->hback_porch);
+                       horizontal_backporch_byte = horizontal_backporch_byte -
+                               (data_phy_cycles * dsi->lanes + 12) *
+                               vm->hback_porch /
+                               (vm->hfront_porch + vm->hback_porch);
                } else {
                        DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
                        horizontal_frontporch_byte = vm->hfront_porch *
index 9ab27aecfcf313b04d7158d6dc6de230b9a7b91c..1bd6b6d15ffb304d38c9cb3bfb718e2964e7a75f 100644 (file)
@@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
        },
 };
 
+static const struct meson_cvbs_mode *
+meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
+{
+       int i;
+
+       for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+               struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+
+               if (drm_mode_match(req_mode, &meson_mode->mode,
+                                  DRM_MODE_MATCH_TIMINGS |
+                                  DRM_MODE_MATCH_CLOCK |
+                                  DRM_MODE_MATCH_FLAGS |
+                                  DRM_MODE_MATCH_3D_FLAGS))
+                       return meson_mode;
+       }
+
+       return NULL;
+}
+
 /* Connector */
 
 static void meson_cvbs_connector_destroy(struct drm_connector *connector)
@@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
                                        struct drm_crtc_state *crtc_state,
                                        struct drm_connector_state *conn_state)
 {
-       int i;
-
-       for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
-               struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
-
-               if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
-                       return 0;
-       }
+       if (meson_cvbs_get_mode(&crtc_state->mode))
+               return 0;
 
        return -EINVAL;
 }
@@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
                                   struct drm_display_mode *mode,
                                   struct drm_display_mode *adjusted_mode)
 {
+       const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
        struct meson_venc_cvbs *meson_venc_cvbs =
                                        encoder_to_meson_venc_cvbs(encoder);
        struct meson_drm *priv = meson_venc_cvbs->priv;
-       int i;
 
-       for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
-               struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+       if (meson_mode) {
+               meson_venci_cvbs_mode_set(priv, meson_mode->enci);
 
-               if (drm_mode_equal(mode, &meson_mode->mode)) {
-                       meson_venci_cvbs_mode_set(priv,
-                                                 meson_mode->enci);
-
-                       /* Setup 27MHz vclk2 for ENCI and VDAC */
-                       meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
-                                        MESON_VCLK_CVBS, MESON_VCLK_CVBS,
-                                        MESON_VCLK_CVBS, true);
-                       break;
-               }
+               /* Setup 27MHz vclk2 for ENCI and VDAC */
+               meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
+                                MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
        }
 }
 
index d43951caeea02b43cd740ee95c491b63d09c4e37..b113876c24283259475e59b8363cb5aef50fad03 100644 (file)
@@ -30,9 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
 static struct drm_driver driver;
 
 static const struct pci_device_id pciidlist[] = {
-       { PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
+       { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
-       { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
        { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
        { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
        { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
index c84f0a8b3f2cebf76f99a70c01e4377fde610d0e..ac678ace09a36aabc8826ff2d2a8c43170937e30 100644 (file)
@@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 
        size = resource_size(res);
 
-       ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+       ptr = devm_ioremap(&pdev->dev, res->start, size);
        if (!ptr) {
                DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
                return ERR_PTR(-ENOMEM);
index 43df86c38f58bcf061491cb718f66e215db40a7c..24f7700768dab9a7d715ff1b2de867eef4f4f9b7 100644 (file)
@@ -114,6 +114,7 @@ struct nv50_head_atom {
                u8 nhsync:1;
                u8 nvsync:1;
                u8 depth:4;
+               u8 bpc;
        } or;
 
        /* Currently only used for MST */
index 549486f1d93760a51c28bf10948e62a86613c428..63425e24601896cb31238ddbcba0b9b7fae03e41 100644 (file)
@@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
                         * same size as the native one (e.g. different
                         * refresh rate)
                         */
-                       if (adjusted_mode->hdisplay == native_mode->hdisplay &&
-                           adjusted_mode->vdisplay == native_mode->vdisplay &&
-                           adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
+                       if (mode->hdisplay == native_mode->hdisplay &&
+                           mode->vdisplay == native_mode->vdisplay &&
+                           mode->type & DRM_MODE_TYPE_DRIVER)
                                break;
                        mode = native_mode;
                        asyc->scaler.full = true;
@@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
                       struct drm_crtc_state *crtc_state,
                       struct drm_connector_state *conn_state)
 {
-       struct nouveau_connector *nv_connector =
-               nouveau_connector(conn_state->connector);
-       return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
-                                          nv_connector->native_mode);
+       struct drm_connector *connector = conn_state->connector;
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
+       struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+       int ret;
+
+       ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+                                         nv_connector->native_mode);
+       if (ret)
+               return ret;
+
+       if (crtc_state->mode_changed || crtc_state->connectors_changed)
+               asyh->or.bpc = connector->display_info.bpc;
+
+       return 0;
 }
 
 /******************************************************************************
@@ -770,32 +780,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
        struct nv50_mstm *mstm = mstc->mstm;
        struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
        int slots;
+       int ret;
+
+       ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
+                                         mstc->native);
+       if (ret)
+               return ret;
+
+       if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+               return 0;
+
+       /*
+        * When restoring duplicated states, we need to make sure that the bw
+        * remains the same and avoid recalculating it, as the connector's bpc
+        * may have changed after the state was duplicated
+        */
+       if (!state->duplicated) {
+               const int clock = crtc_state->adjusted_mode.clock;
 
-       if (crtc_state->mode_changed || crtc_state->connectors_changed) {
                /*
-                * When restoring duplicated states, we need to make sure that
-                * the bw remains the same and avoid recalculating it, as the
-                * connector's bpc may have changed after the state was
-                * duplicated
+                * XXX: Since we don't use HDR in userspace quite yet, limit
+                * the bpc to 8 to save bandwidth on the topology. In the
+                * future, we'll want to properly fix this by dynamically
+                * selecting the highest possible bpc that would fit in the
+                * topology
                 */
-               if (!state->duplicated) {
-                       const int bpp = connector->display_info.bpc * 3;
-                       const int clock = crtc_state->adjusted_mode.clock;
+               asyh->or.bpc = min(connector->display_info.bpc, 8U);
+               asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
+       }
 
-                       asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
-               }
+       slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
+                                             asyh->dp.pbn);
+       if (slots < 0)
+               return slots;
 
-               slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
-                                                     mstc->port,
-                                                     asyh->dp.pbn);
-               if (slots < 0)
-                       return slots;
+       asyh->dp.tu = slots;
 
-               asyh->dp.tu = slots;
-       }
+       return 0;
+}
 
-       return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
-                                          mstc->native);
+static u8
+nv50_dp_bpc_to_depth(unsigned int bpc)
+{
+       switch (bpc) {
+       case  6: return 0x2;
+       case  8: return 0x5;
+       case 10: /* fall-through */
+       default: return 0x6;
+       }
 }
 
 static void
@@ -808,7 +840,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
        struct nv50_mstm *mstm = NULL;
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
-       u8 proto, depth;
+       u8 proto;
        bool r;
 
        drm_connector_list_iter_begin(encoder->dev, &conn_iter);
@@ -837,14 +869,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
        else
                proto = 0x9;
 
-       switch (mstc->connector.display_info.bpc) {
-       case  6: depth = 0x2; break;
-       case  8: depth = 0x5; break;
-       case 10:
-       default: depth = 0x6; break;
-       }
-
-       mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
+       mstm->outp->update(mstm->outp, head->base.index, armh, proto,
+                          nv50_dp_bpc_to_depth(armh->or.bpc));
 
        msto->head = head;
        msto->mstc = mstc;
@@ -1498,20 +1524,14 @@ nv50_sor_enable(struct drm_encoder *encoder)
                                        lvds.lvds.script |= 0x0200;
                        }
 
-                       if (nv_connector->base.display_info.bpc == 8)
+                       if (asyh->or.bpc == 8)
                                lvds.lvds.script |= 0x0200;
                }
 
                nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
                break;
        case DCB_OUTPUT_DP:
-               if (nv_connector->base.display_info.bpc == 6)
-                       depth = 0x2;
-               else
-               if (nv_connector->base.display_info.bpc == 8)
-                       depth = 0x5;
-               else
-                       depth = 0x6;
+               depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
 
                if (nv_encoder->link & 1)
                        proto = 0x8;
@@ -1662,7 +1682,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
        nv50_outp_acquire(nv_encoder);
 
        nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       switch (nv_connector->base.display_info.bpc) {
+       switch (asyh->or.bpc) {
        case 10: asyh->or.depth = 0x6; break;
        case  8: asyh->or.depth = 0x5; break;
        case  6: asyh->or.depth = 0x2; break;
index 71c23bf1fe25915534fc0b61c46a760f500ce921..c9692df2b76cca51604c0fb008643ff26a972be0 100644 (file)
@@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
                              struct nv50_head_atom *asyh,
                              struct nouveau_conn_atom *asyc)
 {
-       struct drm_connector *connector = asyc->state.connector;
        u32 mode = 0x00;
 
        if (asyc->dither.mode == DITHERING_MODE_AUTO) {
-               if (asyh->base.depth > connector->display_info.bpc * 3)
+               if (asyh->base.depth > asyh->or.bpc * 3)
                        mode = DITHERING_MODE_DYNAMIC2X2;
        } else {
                mode = asyc->dither.mode;
        }
 
        if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
-               if (connector->display_info.bpc >= 8)
+               if (asyh->or.bpc >= 8)
                        mode |= DITHERING_DEPTH_8BPC;
        } else {
                mode |= asyc->dither.depth;
index 5b413588b82303f805d9093a254655dad91f4d74..9a9a7f5003d3f370fe5f23926a836fd7ad0a6f37 100644 (file)
@@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
 void
 nouveau_conn_reset(struct drm_connector *connector)
 {
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_conn_atom *asyc;
 
-       if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
-               return;
+       if (drm_drv_uses_atomic_modeset(connector->dev)) {
+               if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
+                       return;
+
+               if (connector->state)
+                       nouveau_conn_atomic_destroy_state(connector,
+                                                         connector->state);
+
+               __drm_atomic_helper_connector_reset(connector, &asyc->state);
+       } else {
+               asyc = &nv_connector->properties_state;
+       }
 
-       if (connector->state)
-               nouveau_conn_atomic_destroy_state(connector, connector->state);
-       __drm_atomic_helper_connector_reset(connector, &asyc->state);
        asyc->dither.mode = DITHERING_MODE_AUTO;
        asyc->dither.depth = DITHERING_DEPTH_AUTO;
        asyc->scaler.mode = DRM_MODE_SCALE_NONE;
@@ -276,8 +284,14 @@ void
 nouveau_conn_attach_properties(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
        struct nouveau_display *disp = nouveau_display(dev);
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
+       struct nouveau_conn_atom *armc;
+
+       if (drm_drv_uses_atomic_modeset(connector->dev))
+               armc = nouveau_conn_atom(connector->state);
+       else
+               armc = &nv_connector->properties_state;
 
        /* Init DVI-I specific properties. */
        if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
@@ -748,9 +762,9 @@ static int
 nouveau_connector_set_property(struct drm_connector *connector,
                               struct drm_property *property, uint64_t value)
 {
-       struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+       struct nouveau_conn_atom *asyc = &nv_connector->properties_state;
        struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
        int ret;
 
index f43a8d63aef86e07c7078501ff4bad0e33ba937b..de84fb4708c7ab22f4e5abbf1aa5918500b3bc63 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <nvif/notify.h>
 
+#include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_dp_helper.h>
@@ -44,6 +45,60 @@ struct dcb_output;
 struct nouveau_backlight;
 #endif
 
+#define nouveau_conn_atom(p)                                                   \
+       container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+       struct drm_connector_state state;
+
+       struct {
+               /* The enum values specifically defined here match nv50/gf119
+                * hw values, and the code relies on this.
+                */
+               enum {
+                       DITHERING_MODE_OFF = 0x00,
+                       DITHERING_MODE_ON = 0x01,
+                       DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+                       DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+                       DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+                       DITHERING_MODE_AUTO
+               } mode;
+               enum {
+                       DITHERING_DEPTH_6BPC = 0x00,
+                       DITHERING_DEPTH_8BPC = 0x02,
+                       DITHERING_DEPTH_AUTO
+               } depth;
+       } dither;
+
+       struct {
+               int mode;       /* DRM_MODE_SCALE_* */
+               struct {
+                       enum {
+                               UNDERSCAN_OFF,
+                               UNDERSCAN_ON,
+                               UNDERSCAN_AUTO,
+                       } mode;
+                       u32 hborder;
+                       u32 vborder;
+               } underscan;
+               bool full;
+       } scaler;
+
+       struct {
+               int color_vibrance;
+               int vibrant_hue;
+       } procamp;
+
+       union {
+               struct {
+                       bool dither:1;
+                       bool scaler:1;
+                       bool procamp:1;
+               };
+               u8 mask;
+       } set;
+};
+
 struct nouveau_connector {
        struct drm_connector base;
        enum dcb_connector_type type;
@@ -63,6 +118,12 @@ struct nouveau_connector {
 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
        struct nouveau_backlight *backlight;
 #endif
+       /*
+        * Our connector property code expects a nouveau_conn_atom struct
+        * even on pre-nv50 where we do not support atomic. This embedded
+        * version gets used in the non atomic modeset case.
+        */
+       struct nouveau_conn_atom properties_state;
 };
 
 static inline struct nouveau_connector *nouveau_connector(
@@ -121,61 +182,6 @@ extern int nouveau_ignorelid;
 extern int nouveau_duallink;
 extern int nouveau_hdmimhz;
 
-#include <drm/drm_crtc.h>
-#define nouveau_conn_atom(p)                                                   \
-       container_of((p), struct nouveau_conn_atom, state)
-
-struct nouveau_conn_atom {
-       struct drm_connector_state state;
-
-       struct {
-               /* The enum values specifically defined here match nv50/gf119
-                * hw values, and the code relies on this.
-                */
-               enum {
-                       DITHERING_MODE_OFF = 0x00,
-                       DITHERING_MODE_ON = 0x01,
-                       DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
-                       DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
-                       DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
-                       DITHERING_MODE_AUTO
-               } mode;
-               enum {
-                       DITHERING_DEPTH_6BPC = 0x00,
-                       DITHERING_DEPTH_8BPC = 0x02,
-                       DITHERING_DEPTH_AUTO
-               } depth;
-       } dither;
-
-       struct {
-               int mode;       /* DRM_MODE_SCALE_* */
-               struct {
-                       enum {
-                               UNDERSCAN_OFF,
-                               UNDERSCAN_ON,
-                               UNDERSCAN_AUTO,
-                       } mode;
-                       u32 hborder;
-                       u32 vborder;
-               } underscan;
-               bool full;
-       } scaler;
-
-       struct {
-               int color_vibrance;
-               int vibrant_hue;
-       } procamp;
-
-       union {
-               struct {
-                       bool dither:1;
-                       bool scaler:1;
-                       bool procamp:1;
-               };
-               u8 mask;
-       } set;
-};
-
 void nouveau_conn_attach_properties(struct drm_connector *);
 void nouveau_conn_reset(struct drm_connector *);
 struct drm_connector_state *
index 4c4e8a30a1ac3cb7e30662fd4adee69ff0074d5a..536ba93b0f463510607781b4d865ff444d35a0fa 100644 (file)
@@ -18,15 +18,18 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
 static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
                                   u32 flags)
 {
-       struct panfrost_device *pfdev = dev_get_drvdata(dev);
+       struct dev_pm_opp *opp;
        int err;
 
+       opp = devfreq_recommended_opp(dev, freq, flags);
+       if (IS_ERR(opp))
+               return PTR_ERR(opp);
+       dev_pm_opp_put(opp);
+
        err = dev_pm_opp_set_rate(dev, *freq);
        if (err)
                return err;
 
-       *freq = clk_get_rate(pfdev->clock);
-
        return 0;
 }
 
@@ -60,20 +63,10 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
        return 0;
 }
 
-static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
-{
-       struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
-
-       *freq = clk_get_rate(pfdev->clock);
-
-       return 0;
-}
-
 static struct devfreq_dev_profile panfrost_devfreq_profile = {
        .polling_ms = 50, /* ~3 frames */
        .target = panfrost_devfreq_target,
        .get_dev_status = panfrost_devfreq_get_dev_status,
-       .get_cur_freq = panfrost_devfreq_get_cur_freq,
 };
 
 int panfrost_devfreq_init(struct panfrost_device *pfdev)
index 9458dc6c750cf36397afcb692f65e5e702f07096..88b431a267af368ac60c2f83b049925f37fc7ad1 100644 (file)
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
                struct drm_file *file)
 {
+       struct panfrost_file_priv *priv = file->driver_priv;
        struct panfrost_gem_object *bo;
        struct drm_panfrost_create_bo *args = data;
+       struct panfrost_gem_mapping *mapping;
 
        if (!args->size || args->pad ||
            (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
-       args->offset = bo->node.start << PAGE_SHIFT;
+       mapping = panfrost_gem_mapping_get(bo, priv);
+       if (!mapping) {
+               drm_gem_object_put_unlocked(&bo->base.base);
+               return -EINVAL;
+       }
+
+       args->offset = mapping->mmnode.start << PAGE_SHIFT;
+       panfrost_gem_mapping_put(mapping);
 
        return 0;
 }
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
                  struct drm_panfrost_submit *args,
                  struct panfrost_job *job)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_object *bo;
+       unsigned int i;
+       int ret;
+
        job->bo_count = args->bo_handle_count;
 
        if (!job->bo_count)
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
        if (!job->implicit_fences)
                return -ENOMEM;
 
-       return drm_gem_objects_lookup(file_priv,
-                                     (void __user *)(uintptr_t)args->bo_handles,
-                                     job->bo_count, &job->bos);
+       ret = drm_gem_objects_lookup(file_priv,
+                                    (void __user *)(uintptr_t)args->bo_handles,
+                                    job->bo_count, &job->bos);
+       if (ret)
+               return ret;
+
+       job->mappings = kvmalloc_array(job->bo_count,
+                                      sizeof(struct panfrost_gem_mapping *),
+                                      GFP_KERNEL | __GFP_ZERO);
+       if (!job->mappings)
+               return -ENOMEM;
+
+       for (i = 0; i < job->bo_count; i++) {
+               struct panfrost_gem_mapping *mapping;
+
+               bo = to_panfrost_bo(job->bos[i]);
+               mapping = panfrost_gem_mapping_get(bo, priv);
+               if (!mapping) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               job->mappings[i] = mapping;
+       }
+
+       return ret;
 }
 
 /**
@@ -303,21 +340,26 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
        }
 
        /* Don't allow mmapping of heap objects as pages are not pinned. */
-       if (to_panfrost_bo(gem_obj)->is_heap)
-               return -EINVAL;
+       if (to_panfrost_bo(gem_obj)->is_heap) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        ret = drm_gem_create_mmap_offset(gem_obj);
        if (ret == 0)
                args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
-       drm_gem_object_put_unlocked(gem_obj);
 
+out:
+       drm_gem_object_put_unlocked(gem_obj);
        return ret;
 }
 
 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
        struct drm_panfrost_get_bo_offset *args = data;
+       struct panfrost_gem_mapping *mapping;
        struct drm_gem_object *gem_obj;
        struct panfrost_gem_object *bo;
 
@@ -328,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
        }
        bo = to_panfrost_bo(gem_obj);
 
-       args->offset = bo->node.start << PAGE_SHIFT;
-
+       mapping = panfrost_gem_mapping_get(bo, priv);
        drm_gem_object_put_unlocked(gem_obj);
+
+       if (!mapping)
+               return -EINVAL;
+
+       args->offset = mapping->mmnode.start << PAGE_SHIFT;
+       panfrost_gem_mapping_put(mapping);
        return 0;
 }
 
 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
        struct drm_panfrost_madvise *args = data;
        struct panfrost_device *pfdev = dev->dev_private;
        struct drm_gem_object *gem_obj;
+       struct panfrost_gem_object *bo;
+       int ret = 0;
 
        gem_obj = drm_gem_object_lookup(file_priv, args->handle);
        if (!gem_obj) {
@@ -347,23 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                return -ENOENT;
        }
 
-       args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+       bo = to_panfrost_bo(gem_obj);
 
-       if (args->retained) {
-               struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+       mutex_lock(&pfdev->shrinker_lock);
+       mutex_lock(&bo->mappings.lock);
+       if (args->madv == PANFROST_MADV_DONTNEED) {
+               struct panfrost_gem_mapping *first;
+
+               first = list_first_entry(&bo->mappings.list,
+                                        struct panfrost_gem_mapping,
+                                        node);
+
+               /*
+                * If we want to mark the BO purgeable, there must be only one
+                * user: the caller FD.
+                * We could do something smarter and mark the BO purgeable only
+                * when all its users have marked it purgeable, but globally
+                * visible/shared BOs are likely to never be marked purgeable
+                * anyway, so let's not bother.
+                */
+               if (!list_is_singular(&bo->mappings.list) ||
+                   WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+                       ret = -EINVAL;
+                       goto out_unlock_mappings;
+               }
+       }
 
-               mutex_lock(&pfdev->shrinker_lock);
+       args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
 
+       if (args->retained) {
                if (args->madv == PANFROST_MADV_DONTNEED)
-                       list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+                       list_add_tail(&bo->base.madv_list,
+                                     &pfdev->shrinker_list);
                else if (args->madv == PANFROST_MADV_WILLNEED)
                        list_del_init(&bo->base.madv_list);
-
-               mutex_unlock(&pfdev->shrinker_lock);
        }
 
+out_unlock_mappings:
+       mutex_unlock(&bo->mappings.lock);
+       mutex_unlock(&pfdev->shrinker_lock);
+
        drm_gem_object_put_unlocked(gem_obj);
-       return 0;
+       return ret;
 }
 
 int panfrost_unstable_ioctl_check(void)
@@ -443,7 +518,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct panfrost_file_priv *panfrost_priv = file->driver_priv;
 
-       panfrost_perfcnt_close(panfrost_priv);
+       panfrost_perfcnt_close(file);
        panfrost_job_close(panfrost_priv);
 
        panfrost_mmu_pgtable_free(panfrost_priv);
index deca0c30bbd436b7c1579e8d03f916c69999fa9c..17b654e1eb942fcff8c52d8cddc9bb46ce3a4188 100644 (file)
@@ -19,6 +19,22 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
        struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        struct panfrost_device *pfdev = obj->dev->dev_private;
 
+       /*
+        * Make sure the BO is no longer inserted in the shrinker list before
+        * taking care of the destruction itself. If we don't do that we have a
+        * race condition between this function and what's done in
+        * panfrost_gem_shrinker_scan().
+        */
+       mutex_lock(&pfdev->shrinker_lock);
+       list_del_init(&bo->base.madv_list);
+       mutex_unlock(&pfdev->shrinker_lock);
+
+       /*
+        * If we still have mappings attached to the BO, there's a problem in
+        * our refcounting.
+        */
+       WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
        if (bo->sgts) {
                int i;
                int n_sgt = bo->base.base.size / SZ_2M;
@@ -33,15 +49,73 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
                kfree(bo->sgts);
        }
 
-       mutex_lock(&pfdev->shrinker_lock);
-       if (!list_empty(&bo->base.madv_list))
-               list_del(&bo->base.madv_list);
-       mutex_unlock(&pfdev->shrinker_lock);
-
        drm_gem_shmem_free_object(obj);
 }
 
-static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+                        struct panfrost_file_priv *priv)
+{
+       struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(iter, &bo->mappings.list, node) {
+               if (iter->mmu == &priv->mmu) {
+                       kref_get(&iter->refcount);
+                       mapping = iter;
+                       break;
+               }
+       }
+       mutex_unlock(&bo->mappings.lock);
+
+       return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+       struct panfrost_file_priv *priv;
+
+       if (mapping->active)
+               panfrost_mmu_unmap(mapping);
+
+       priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+       spin_lock(&priv->mm_lock);
+       if (drm_mm_node_allocated(&mapping->mmnode))
+               drm_mm_remove_node(&mapping->mmnode);
+       spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+       struct panfrost_gem_mapping *mapping;
+
+       mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+       panfrost_gem_teardown_mapping(mapping);
+       drm_gem_object_put_unlocked(&mapping->obj->base.base);
+       kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+       if (!mapping)
+               return;
+
+       kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+       struct panfrost_gem_mapping *mapping;
+
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(mapping, &bo->mappings.list, node)
+               panfrost_gem_teardown_mapping(mapping);
+       mutex_unlock(&bo->mappings.lock);
+}
+
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
        int ret;
        size_t size = obj->size;
@@ -49,6 +123,16 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
        struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
        struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_mapping *mapping;
+
+       mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+       if (!mapping)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&mapping->node);
+       kref_init(&mapping->refcount);
+       drm_gem_object_get(obj);
+       mapping->obj = bo;
 
        /*
         * Executable buffers cannot cross a 16MB boundary as the program
@@ -61,37 +145,48 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
        else
                align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
 
-       bo->mmu = &priv->mmu;
+       mapping->mmu = &priv->mmu;
        spin_lock(&priv->mm_lock);
-       ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+       ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
                                         size >> PAGE_SHIFT, align, color, 0);
        spin_unlock(&priv->mm_lock);
        if (ret)
-               return ret;
+               goto err;
 
        if (!bo->is_heap) {
-               ret = panfrost_mmu_map(bo);
-               if (ret) {
-                       spin_lock(&priv->mm_lock);
-                       drm_mm_remove_node(&bo->node);
-                       spin_unlock(&priv->mm_lock);
-               }
+               ret = panfrost_mmu_map(mapping);
+               if (ret)
+                       goto err;
        }
+
+       mutex_lock(&bo->mappings.lock);
+       WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+       list_add_tail(&mapping->node, &bo->mappings.list);
+       mutex_unlock(&bo->mappings.lock);
+
+err:
+       if (ret)
+               panfrost_gem_mapping_put(mapping);
        return ret;
 }
 
-static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
-       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+       struct panfrost_gem_mapping *mapping = NULL, *iter;
 
-       if (bo->is_mapped)
-               panfrost_mmu_unmap(bo);
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(iter, &bo->mappings.list, node) {
+               if (iter->mmu == &priv->mmu) {
+                       mapping = iter;
+                       list_del(&iter->node);
+                       break;
+               }
+       }
+       mutex_unlock(&bo->mappings.lock);
 
-       spin_lock(&priv->mm_lock);
-       if (drm_mm_node_allocated(&bo->node))
-               drm_mm_remove_node(&bo->node);
-       spin_unlock(&priv->mm_lock);
+       panfrost_gem_mapping_put(mapping);
 }
 
 static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -131,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
        if (!obj)
                return NULL;
 
+       INIT_LIST_HEAD(&obj->mappings.list);
+       mutex_init(&obj->mappings.lock);
        obj->base.base.funcs = &panfrost_gem_funcs;
 
        return &obj->base.base;
index 50920819cc1698fce981f9e4fdd104ab631a9d3f..ca1bc9019600c839d384a6564a2dcf4d8e7be87a 100644 (file)
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
        struct drm_gem_shmem_object base;
        struct sg_table *sgts;
 
-       struct panfrost_mmu *mmu;
-       struct drm_mm_node node;
-       bool is_mapped          :1;
+       /*
+        * Use a list for now. If searching a mapping ever becomes the
+        * bottleneck, we should consider using an RB-tree, or even better,
+        * let the core store drm_gem_object_mapping entries (where we
+        * could place driver specific data) instead of drm_gem_object ones
+        * in its drm_file->object_idr table.
+        *
+        * struct drm_gem_object_mapping {
+        *      struct drm_gem_object *obj;
+        *      void *driver_priv;
+        * };
+        */
+       struct {
+               struct list_head list;
+               struct mutex lock;
+       } mappings;
+
        bool noexec             :1;
        bool is_heap            :1;
 };
 
+struct panfrost_gem_mapping {
+       struct list_head node;
+       struct kref refcount;
+       struct panfrost_gem_object *obj;
+       struct drm_mm_node mmnode;
+       struct panfrost_mmu *mmu;
+       bool active             :1;
+};
+
 static inline
 struct  panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
 {
        return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
 }
 
-static inline
-struct  panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
 {
-       return container_of(node, struct panfrost_gem_object, node);
+       return container_of(node, struct panfrost_gem_mapping, mmnode);
 }
 
 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -45,6 +68,16 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
                                u32 flags,
                                uint32_t *handle);
 
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void panfrost_gem_close(struct drm_gem_object *obj,
+                       struct drm_file *file_priv);
+
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+                        struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
 void panfrost_gem_shrinker_init(struct drm_device *dev);
 void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
 
index 458f0fa681113c062f14c175cba913a5a9e2dd90..f5dd7b29bc954909ad5d1508863c4b8a9e5db007 100644 (file)
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
 static bool panfrost_gem_purge(struct drm_gem_object *obj)
 {
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
 
        if (!mutex_trylock(&shmem->pages_lock))
                return false;
 
-       panfrost_mmu_unmap(to_panfrost_bo(obj));
+       panfrost_gem_teardown_mappings(bo);
        drm_gem_shmem_purge_locked(obj);
 
        mutex_unlock(&shmem->pages_lock);
index d411eb6c8eb953451fa105ebe4b487373e91e01c..e364ee00f3d01df372d991307b166849bed674ab 100644 (file)
@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
        dma_fence_put(job->done_fence);
        dma_fence_put(job->render_done_fence);
 
-       if (job->bos) {
+       if (job->mappings) {
                for (i = 0; i < job->bo_count; i++)
+                       panfrost_gem_mapping_put(job->mappings[i]);
+               kvfree(job->mappings);
+       }
+
+       if (job->bos) {
+               struct panfrost_gem_object *bo;
+
+               for (i = 0; i < job->bo_count; i++) {
+                       bo = to_panfrost_bo(job->bos[i]);
                        drm_gem_object_put_unlocked(job->bos[i]);
+               }
+
                kvfree(job->bos);
        }
 
index 62454128a792e916f95736a9458e5310fe17d622..bbd3ba97ff6779c64f02adf9115f2e94860ea51c 100644 (file)
@@ -32,6 +32,7 @@ struct panfrost_job {
 
        /* Exclusive fences we have taken from the BOs to wait for */
        struct dma_fence **implicit_fences;
+       struct panfrost_gem_mapping **mappings;
        struct drm_gem_object **bos;
        u32 bo_count;
 
index a3ed64a1f15ecd8fd686b99fce496ddb760ed938..763cfca886a73a7c90d8d6df9e0e4e36c7d2bf7d 100644 (file)
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
        return 0;
 }
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
 {
+       struct panfrost_gem_object *bo = mapping->obj;
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
        struct sg_table *sgt;
        int prot = IOMMU_READ | IOMMU_WRITE;
 
-       if (WARN_ON(bo->is_mapped))
+       if (WARN_ON(mapping->active))
                return 0;
 
        if (bo->noexec)
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
        if (WARN_ON(IS_ERR(sgt)))
                return PTR_ERR(sgt);
 
-       mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
-       bo->is_mapped = true;
+       mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+                  prot, sgt);
+       mapping->active = true;
 
        return 0;
 }
 
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
 {
+       struct panfrost_gem_object *bo = mapping->obj;
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
-       struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
-       u64 iova = bo->node.start << PAGE_SHIFT;
-       size_t len = bo->node.size << PAGE_SHIFT;
+       struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+       u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+       size_t len = mapping->mmnode.size << PAGE_SHIFT;
        size_t unmapped_len = 0;
 
-       if (WARN_ON(!bo->is_mapped))
+       if (WARN_ON(!mapping->active))
                return;
 
-       dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+       dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+               mapping->mmu->as, iova, len);
 
        while (unmapped_len < len) {
                size_t unmapped_page;
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
                unmapped_len += pgsize;
        }
 
-       panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
-       bo->is_mapped = false;
+       panfrost_mmu_flush_range(pfdev, mapping->mmu,
+                                mapping->mmnode.start << PAGE_SHIFT, len);
+       mapping->active = false;
 }
 
 static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
        free_io_pgtable_ops(mmu->pgtbl_ops);
 }
 
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
 {
-       struct panfrost_gem_object *bo = NULL;
+       struct panfrost_gem_mapping *mapping = NULL;
        struct panfrost_file_priv *priv;
        struct drm_mm_node *node;
        u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +423,9 @@ found_mmu:
        drm_mm_for_each_node(node, &priv->mm) {
                if (offset >= node->start &&
                    offset < (node->start + node->size)) {
-                       bo = drm_mm_node_to_panfrost_bo(node);
-                       drm_gem_object_get(&bo->base.base);
+                       mapping = drm_mm_node_to_panfrost_mapping(node);
+
+                       kref_get(&mapping->refcount);
                        break;
                }
        }
@@ -427,7 +433,7 @@ found_mmu:
        spin_unlock(&priv->mm_lock);
 out:
        spin_unlock(&pfdev->as_lock);
-       return bo;
+       return mapping;
 }
 
 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                                       u64 addr)
 {
        int ret, i;
+       struct panfrost_gem_mapping *bomapping;
        struct panfrost_gem_object *bo;
        struct address_space *mapping;
        pgoff_t page_offset;
        struct sg_table *sgt;
        struct page **pages;
 
-       bo = addr_to_drm_mm_node(pfdev, as, addr);
-       if (!bo)
+       bomapping = addr_to_mapping(pfdev, as, addr);
+       if (!bomapping)
                return -ENOENT;
 
+       bo = bomapping->obj;
        if (!bo->is_heap) {
                dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
-                        bo->node.start << PAGE_SHIFT);
+                        bomapping->mmnode.start << PAGE_SHIFT);
                ret = -EINVAL;
                goto err_bo;
        }
-       WARN_ON(bo->mmu->as != as);
+       WARN_ON(bomapping->mmu->as != as);
 
        /* Assume 2MB alignment and size multiple */
        addr &= ~((u64)SZ_2M - 1);
        page_offset = addr >> PAGE_SHIFT;
-       page_offset -= bo->node.start;
+       page_offset -= bomapping->mmnode.start;
 
        mutex_lock(&bo->base.pages_lock);
 
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                goto err_map;
        }
 
-       mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+       mmu_map_sg(pfdev, bomapping->mmu, addr,
+                  IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
 
-       bo->is_mapped = true;
+       bomapping->active = true;
 
        dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
 
-       drm_gem_object_put_unlocked(&bo->base.base);
+       panfrost_gem_mapping_put(bomapping);
 
        return 0;
 
index 7c5b6775ae230cc77f576c978667bd82d011f199..44fc2edf63ce66a13dc49126ae40f49f2b1506c8 100644 (file)
@@ -4,12 +4,12 @@
 #ifndef __PANFROST_MMU_H__
 #define __PANFROST_MMU_H__
 
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
 struct panfrost_file_priv;
 struct panfrost_mmu;
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
 
 int panfrost_mmu_init(struct panfrost_device *pfdev);
 void panfrost_mmu_fini(struct panfrost_device *pfdev);
index 2dba192bf198454982d54ab9403f253e0529c652..684820448be31c7d459a1f139b5003b8cacd1968 100644 (file)
@@ -25,7 +25,7 @@
 #define V4_SHADERS_PER_COREGROUP       4
 
 struct panfrost_perfcnt {
-       struct panfrost_gem_object *bo;
+       struct panfrost_gem_mapping *mapping;
        size_t bosize;
        void *buf;
        struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
        int ret;
 
        reinit_completion(&pfdev->perfcnt->dump_comp);
-       gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+       gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
        gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
        gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
        gpu_write(pfdev, GPU_INT_CLEAR,
@@ -67,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
 }
 
 static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
-                                         struct panfrost_file_priv *user,
+                                         struct drm_file *file_priv,
                                          unsigned int counterset)
 {
+       struct panfrost_file_priv *user = file_priv->driver_priv;
        struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
        struct drm_gem_shmem_object *bo;
        u32 cfg;
@@ -88,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
-       perfcnt->bo = to_panfrost_bo(&bo->base);
-
        /* Map the perfcnt buf in the address space attached to file_priv. */
-       ret = panfrost_mmu_map(perfcnt->bo);
+       ret = panfrost_gem_open(&bo->base, file_priv);
        if (ret)
                goto err_put_bo;
 
+       perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+                                                   user);
+       if (!perfcnt->mapping) {
+               ret = -EINVAL;
+               goto err_close_bo;
+       }
+
        perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
        if (IS_ERR(perfcnt->buf)) {
                ret = PTR_ERR(perfcnt->buf);
-               goto err_put_bo;
+               goto err_put_mapping;
        }
 
        /*
@@ -153,18 +159,26 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
        if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
                gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
 
+       /* The BO ref is retained by the mapping. */
+       drm_gem_object_put_unlocked(&bo->base);
+
        return 0;
 
 err_vunmap:
-       drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+       drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+       panfrost_gem_mapping_put(perfcnt->mapping);
+err_close_bo:
+       panfrost_gem_close(&bo->base, file_priv);
 err_put_bo:
        drm_gem_object_put_unlocked(&bo->base);
        return ret;
 }
 
 static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
-                                          struct panfrost_file_priv *user)
+                                          struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *user = file_priv->driver_priv;
        struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
 
        if (user != perfcnt->user)
@@ -178,10 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
                  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
 
        perfcnt->user = NULL;
-       drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+       drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
        perfcnt->buf = NULL;
-       drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
-       perfcnt->bo = NULL;
+       panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+       panfrost_gem_mapping_put(perfcnt->mapping);
+       perfcnt->mapping = NULL;
        pm_runtime_mark_last_busy(pfdev->dev);
        pm_runtime_put_autosuspend(pfdev->dev);
 
@@ -191,7 +206,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
 int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
-       struct panfrost_file_priv *pfile = file_priv->driver_priv;
        struct panfrost_device *pfdev = dev->dev_private;
        struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
        struct drm_panfrost_perfcnt_enable *req = data;
@@ -207,10 +221,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
 
        mutex_lock(&perfcnt->lock);
        if (req->enable)
-               ret = panfrost_perfcnt_enable_locked(pfdev, pfile,
+               ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
                                                     req->counterset);
        else
-               ret = panfrost_perfcnt_disable_locked(pfdev, pfile);
+               ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
        mutex_unlock(&perfcnt->lock);
 
        return ret;
@@ -248,15 +262,16 @@ out:
        return ret;
 }
 
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile)
+void panfrost_perfcnt_close(struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *pfile = file_priv->driver_priv;
        struct panfrost_device *pfdev = pfile->pfdev;
        struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
 
        pm_runtime_get_sync(pfdev->dev);
        mutex_lock(&perfcnt->lock);
        if (perfcnt->user == pfile)
-               panfrost_perfcnt_disable_locked(pfdev, pfile);
+               panfrost_perfcnt_disable_locked(pfdev, file_priv);
        mutex_unlock(&perfcnt->lock);
        pm_runtime_mark_last_busy(pfdev->dev);
        pm_runtime_put_autosuspend(pfdev->dev);
index 13b8fdaa1b432175f0176cfc000876f3e897b7e9..8bbcf5f5fb3391ce625eedbcb4c1b32e8bc9ebb6 100644 (file)
@@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev);
 void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev);
 int panfrost_perfcnt_init(struct panfrost_device *pfdev);
 void panfrost_perfcnt_fini(struct panfrost_device *pfdev);
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile);
+void panfrost_perfcnt_close(struct drm_file *file_priv);
 int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
index 098bc9f40b983300a5bfbf28851cee016e929732..15bf8a207cb0f648abbb7d2540cf54e682e9734d 100644 (file)
@@ -443,7 +443,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
                                           mem->bus.size);
                else
                        mem->bus.addr =
-                               ioremap_nocache(mem->bus.base + mem->bus.offset,
+                               ioremap(mem->bus.base + mem->bus.offset,
                                                mem->bus.size);
                if (!mem->bus.addr)
                        return -ENOMEM;
index 83c4586665b4f3258f58fea49f162baf4ec642e9..81ac9b658a70a469661becf4272ba7ae1cc9ea3c 100644 (file)
@@ -95,7 +95,7 @@ struct cdn_dp_device {
        struct cdn_dp_port *port[MAX_PHY];
        u8 ports;
        u8 max_lanes;
-       u8 max_rate;
+       unsigned int max_rate;
        u8 lanes;
        int active_port;
 
index 68289b0b063a7a06c06e8d54e603fd4047fc05e9..68261c7f8c5fa6c4ed3d4d3d2e63eba919a9ba4e 100644 (file)
@@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
                DRM_ERROR("Invalid dvo resource\n");
                return -ENOMEM;
        }
-       dvo->regs = devm_ioremap_nocache(dev, res->start,
+       dvo->regs = devm_ioremap(dev, res->start,
                        resource_size(res));
        if (!dvo->regs)
                return -ENOMEM;
index 8f7bf33815fd18f6e0b3f11eb078b98282a57e8c..2bb32009d117a050d2d1dda121aec678f0ea31ec 100644 (file)
@@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev)
                DRM_ERROR("Invalid hda resource\n");
                return -ENOMEM;
        }
-       hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       hda->regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!hda->regs)
                return -ENOMEM;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                        "video-dacs-ctrl");
        if (res) {
-               hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+               hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
                                resource_size(res));
                if (!hda->video_dacs_ctrl)
                        return -ENOMEM;
index 814560ead4e184063a9fc7b33ac4bdb7e9795b08..64ed102033c8b32d4af161f2ea9b7eea36c195c4 100644 (file)
@@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
                ret = -ENOMEM;
                goto release_adapter;
        }
-       hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!hdmi->regs) {
                ret = -ENOMEM;
                goto release_adapter;
index 5767e93dd1cdc034c520adac8b774f745c1f185a..c36a8da373cb5d81b0269d31a1295f07b2b83bbb 100644 (file)
@@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev)
                DRM_ERROR("Invalid glue resource\n");
                return -ENOMEM;
        }
-       tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!tvout->regs)
                return -ENOMEM;
 
index 0b17ac8a3faaa840095a8665083143060c15b4c4..5e5f82b6a5d940809631a62a8493268d96eee683 100644 (file)
@@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev)
                DRM_ERROR("Get memory resource failed\n");
                return -ENOMEM;
        }
-       vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!vtg->regs) {
                DRM_ERROR("failed to remap I/O memory\n");
                return -ENOMEM;
index a7c4654445c75b652a8b3d8462f2d52023994ed9..68d4644ac2dcc32a2d4118d824f13eba5d284921 100644 (file)
@@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
        struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
 
        cec_unregister_adapter(hdmi->cec_adap);
-       drm_connector_cleanup(&hdmi->connector);
-       drm_encoder_cleanup(&hdmi->encoder);
        i2c_del_adapter(hdmi->i2c);
        i2c_put_adapter(hdmi->ddc_i2c);
        clk_disable_unprepare(hdmi->mod_clk);
index 42651d737c55b3bb1285a0008608e66861588c1d..c81cdce6ed559b0e0b67b30d2d13b5ba5d020c22 100644 (file)
@@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
 
        WARN_ON(!tcon->quirks->has_channel_0);
 
-       tcon->dclk_min_div = 1;
+       tcon->dclk_min_div = tcon->quirks->dclk_min_div;
        tcon->dclk_max_div = 127;
        sun4i_tcon0_mode_set_common(tcon, mode);
 
@@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
 static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
+       .dclk_min_div           = 4,
        .set_mux                = sun4i_a10_tcon_set_mux,
 };
 
 static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
+       .dclk_min_div           = 4,
        .set_mux                = sun5i_a13_tcon_set_mux,
 };
 
@@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
        .has_channel_1          = true,
        .has_lvds_alt           = true,
        .needs_de_be_mux        = true,
+       .dclk_min_div           = 1,
        .set_mux                = sun6i_tcon_set_mux,
 };
 
@@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
        .needs_de_be_mux        = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
        .has_channel_0          = true,
        .has_channel_1          = true,
+       .dclk_min_div           = 4,
        /* Same display pipeline structure as A10 */
        .set_mux                = sun4i_a10_tcon_set_mux,
 };
@@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
 static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
        .has_channel_0          = true,
        .has_lvds_alt           = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
        .supports_lvds          = true,
        .has_channel_0          = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
 
 static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
        .has_channel_0          = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
-       .has_channel_0  = true,
-       .needs_edp_reset = true,
+       .has_channel_0          = true,
+       .needs_edp_reset        = true,
+       .dclk_min_div           = 1,
 };
 
 static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
index f9f1fe80b206c36772021d88beb54b2f8551eea6..a62ec826ae71e0d429d334c97f0c9e4f0a3a2d8f 100644 (file)
@@ -224,6 +224,7 @@ struct sun4i_tcon_quirks {
        bool    needs_de_be_mux; /* sun6i needs mux to select backend */
        bool    needs_edp_reset; /* a80 edp reset needed for tcon0 access */
        bool    supports_lvds;   /* Does the TCON support an LVDS output? */
+       u8      dclk_min_div;   /* minimum divider for TCON0 DCLK */
 
        /* callback to handle tcon muxing options */
        int     (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
index 2a9e675973757bc875b0135afb923777c48c0956..a3612369750fd73d530b7525a9ad1a57b32908e7 100644 (file)
@@ -256,7 +256,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
                goto init_failed;
        }
 
-       priv->mmio = ioremap_nocache(res->start, resource_size(res));
+       priv->mmio = ioremap(res->start, resource_size(res));
        if (!priv->mmio) {
                dev_err(dev, "failed to ioremap\n");
                ret = -ENOMEM;
index 6b0883a1776e1c8645024379242b3eabfd2b1f33..97fd1dafc3e850f46c258d3b0a6407aec7289346 100644 (file)
@@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
                if (mem->placement & TTM_PL_FLAG_WC)
                        addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
                else
-                       addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+                       addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
                if (!addr) {
                        (void) ttm_mem_io_lock(man, false);
                        ttm_mem_io_free(bdev, mem);
@@ -565,7 +565,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
                        map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
                                                  size);
                else
-                       map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+                       map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
                                                       size);
        }
        return (!map->virtual) ? -ENOMEM : 0;
index 390524143139b6999a05f3413726780558114e2c..1635a9ff47943a06e9eea053571eb96f301678c9 100644 (file)
@@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
                if (!objs)
                        return;
                virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+               virtio_gpu_array_lock_resv(objs);
                virtio_gpu_cmd_transfer_to_host_2d
                        (vgdev, 0,
                         plane->state->crtc_w,
index 8063b1d567b1dd2fda9a0e9ed150c9d258f3d810..e6e4c841fb06f4c0dd1c73bf086b466c609fd23f 100644 (file)
@@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field,
                      struct hid_usage *usage, __s32 value)
 {
        if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
-           (usage->hid & HID_USAGE) != 0x00 && !usage->type) {
+           (usage->hid & HID_USAGE) != 0x00 &&
+           (usage->hid & HID_USAGE) != 0xff && !usage->type) {
                hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
                         usage->hid & HID_USAGE);
        }
index e0b241bd3070c5a15e9ac90ecc81573aad6452ec..851fe54ea59e7c2636ab204372c49b749a136014 100644 (file)
@@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
        offset = report->size;
        report->size += parser->global.report_size * parser->global.report_count;
 
+       /* Total size check: Allow for possible report index byte */
+       if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+               hid_err(parser->device, "report is too long\n");
+               return -1;
+       }
+
        if (!parser->local.usage_index) /* Ignore padding fields */
                return 0;
 
index 7e1689ef35f5de36ff681bc931539971e39a8b39..3a400ce603c4f39cd3e21e8210f6f06a7d4366bd 100644 (file)
 #define USB_VENDOR_ID_ITE               0x048d
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA   0x8386
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2  0x8350
+#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720   0x837a
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA900       0x8396
 #define USB_DEVICE_ID_ITE8595          0x8595
 
 #define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
 #define USB_DEVICE_ID_LG_MELFAS_MT     0x6007
 #define I2C_DEVICE_ID_LG_8001          0x8001
+#define I2C_DEVICE_ID_LG_7010          0x7010
 
 #define USB_VENDOR_ID_LOGITECH         0x046d
 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
 #define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
 
index 63855f275a38c3f39eaa917744909a2ec97b9c8f..dea9cc65bf8007bc101c26b308c28e32b7714abb 100644 (file)
@@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
        }
 
 mapped:
-       if (device->driver->input_mapped && device->driver->input_mapped(device,
-                               hidinput, field, usage, &bit, &max) < 0)
-               goto ignore;
+       if (device->driver->input_mapped &&
+           device->driver->input_mapped(device, hidinput, field, usage,
+                                        &bit, &max) < 0) {
+               /*
+                * The driver indicated that no further generic handling
+                * of the usage is desired.
+                */
+               return;
+       }
 
        set_bit(usage->type, input->evbit);
 
@@ -1215,9 +1221,11 @@ mapped:
                set_bit(MSC_SCAN, input->mscbit);
        }
 
-ignore:
        return;
 
+ignore:
+       usage->type = 0;
+       usage->code = 0;
 }
 
 static void hidinput_handle_scroll(struct hid_usage *usage,
index a45f2352618d390575f34795219e1d211c938be0..c436e12feb23315f141362dccc38c2c549c8d9f9 100644 (file)
@@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
 static const struct hid_device_id ite_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
        { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+       /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
+                        USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
index cd9193078525be7a653fed0e09851880e2ad110c..70e1cb928bf038876cd603e99302ea7e5687c426 100644 (file)
@@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click,
 #define HIDPP_REPORT_LONG_LENGTH               20
 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH      64
 
+#define HIDPP_REPORT_SHORT_SUPPORTED           BIT(0)
+#define HIDPP_REPORT_LONG_SUPPORTED            BIT(1)
+#define HIDPP_REPORT_VERY_LONG_SUPPORTED       BIT(2)
+
 #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS      0x03
 #define HIDPP_SUB_ID_ROLLER                    0x05
 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS          0x06
@@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
 #define HIDPP_CAPABILITY_HIDPP20_BATTERY       BIT(1)
 #define HIDPP_CAPABILITY_BATTERY_MILEAGE       BIT(2)
 #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS  BIT(3)
+#define HIDPP_CAPABILITY_BATTERY_VOLTAGE       BIT(4)
 
 /*
  * There are two hidpp protocols in use, the first version hidpp10 is known
@@ -135,12 +140,15 @@ struct hidpp_report {
 struct hidpp_battery {
        u8 feature_index;
        u8 solar_feature_index;
+       u8 voltage_feature_index;
        struct power_supply_desc desc;
        struct power_supply *ps;
        char name[64];
        int status;
        int capacity;
        int level;
+       int voltage;
+       int charge_type;
        bool online;
 };
 
@@ -183,9 +191,12 @@ struct hidpp_device {
 
        unsigned long quirks;
        unsigned long capabilities;
+       u8 supported_reports;
 
        struct hidpp_battery battery;
        struct hidpp_scroll_counter vertical_wheel_counter;
+
+       u8 wireless_feature_index;
 };
 
 /* HID++ 1.0 error codes */
@@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
        struct hidpp_report *message;
        int ret, max_count;
 
+       /* Send as long report if short reports are not supported. */
+       if (report_id == REPORT_ID_HIDPP_SHORT &&
+           !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED))
+               report_id = REPORT_ID_HIDPP_LONG;
+
        switch (report_id) {
        case REPORT_ID_HIDPP_SHORT:
                max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
@@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question,
            (answer->fap.params[0] == question->fap.funcindex_clientid);
 }
 
-static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
+               struct hidpp_report *report)
 {
-       return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
-               (report->rap.sub_id == 0x41);
+       return (hidpp->wireless_feature_index &&
+               (report->fap.feature_index == hidpp->wireless_feature_index)) ||
+               ((report->report_id == REPORT_ID_HIDPP_SHORT) &&
+               (report->rap.sub_id == 0x41));
 }
 
 /**
@@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp,
        return 0;
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x1001: Battery voltage                                                    */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001
+
+#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00
+
+#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00
+
+static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+                                               int *level, int *charge_type)
+{
+       int status;
+
+       long charge_sts = (long)data[2];
+
+       *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+       switch (data[2] & 0xe0) {
+       case 0x00:
+               status = POWER_SUPPLY_STATUS_CHARGING;
+               break;
+       case 0x20:
+               status = POWER_SUPPLY_STATUS_FULL;
+               *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+               break;
+       case 0x40:
+               status = POWER_SUPPLY_STATUS_DISCHARGING;
+               break;
+       case 0xe0:
+               status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               break;
+       default:
+               status = POWER_SUPPLY_STATUS_UNKNOWN;
+       }
+
+       *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+       if (test_bit(3, &charge_sts)) {
+               *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+       }
+       if (test_bit(4, &charge_sts)) {
+               *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+       }
+
+       if (test_bit(5, &charge_sts)) {
+               *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+       }
+
+       *voltage = get_unaligned_be16(data);
+
+       return status;
+}
+
+static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
+                                                u8 feature_index,
+                                                int *status, int *voltage,
+                                                int *level, int *charge_type)
+{
+       struct hidpp_report response;
+       int ret;
+       u8 *params = (u8 *)response.fap.params;
+
+       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+                                         CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE,
+                                         NULL, 0, &response);
+
+       if (ret > 0) {
+               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+                       __func__, ret);
+               return -EPROTO;
+       }
+       if (ret)
+               return ret;
+
+       hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE;
+
+       *status = hidpp20_battery_map_status_voltage(params, voltage,
+                                                    level, charge_type);
+
+       return 0;
+}
+
+static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
+{
+       u8 feature_type;
+       int ret;
+       int status, voltage, level, charge_type;
+
+       if (hidpp->battery.voltage_feature_index == 0xff) {
+               ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
+                                            &hidpp->battery.voltage_feature_index,
+                                            &feature_type);
+               if (ret)
+                       return ret;
+       }
+
+       ret = hidpp20_battery_get_battery_voltage(hidpp,
+                                                 hidpp->battery.voltage_feature_index,
+                                                 &status, &voltage, &level, &charge_type);
+
+       if (ret)
+               return ret;
+
+       hidpp->battery.status = status;
+       hidpp->battery.voltage = voltage;
+       hidpp->battery.level = level;
+       hidpp->battery.charge_type = charge_type;
+       hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+       return 0;
+}
+
+static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
+                                           u8 *data, int size)
+{
+       struct hidpp_report *report = (struct hidpp_report *)data;
+       int status, voltage, level, charge_type;
+
+       if (report->fap.feature_index != hidpp->battery.voltage_feature_index ||
+               report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST)
+               return 0;
+
+       status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage,
+                                                   &level, &charge_type);
+
+       hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+       if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
+               hidpp->battery.voltage = voltage;
+               hidpp->battery.status = status;
+               hidpp->battery.level = level;
+               hidpp->battery.charge_type = charge_type;
+               if (hidpp->battery.ps)
+                       power_supply_changed(hidpp->battery.ps);
+       }
+       return 0;
+}
+
 static enum power_supply_property hidpp_battery_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
        POWER_SUPPLY_PROP_STATUS,
@@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
        0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */
        0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */
+       0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */
 };
 
 static int hidpp_battery_get_property(struct power_supply *psy,
@@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy,
                case POWER_SUPPLY_PROP_SERIAL_NUMBER:
                        val->strval = hidpp->hid_dev->uniq;
                        break;
+               case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+                       /* hardware reports voltage in in mV. sysfs expects uV */
+                       val->intval = hidpp->battery.voltage * 1000;
+                       break;
+               case POWER_SUPPLY_PROP_CHARGE_TYPE:
+                       val->intval = hidpp->battery.charge_type;
+                       break;
                default:
                        ret = -EINVAL;
                        break;
@@ -1276,6 +1441,24 @@ static int hidpp_battery_get_property(struct power_supply *psy,
        return ret;
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x1d4b: Wireless device status                                             */
+/* -------------------------------------------------------------------------- */
+#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS                      0x1d4b
+
+static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
+{
+       u8 feature_type;
+       int ret;
+
+       ret = hidpp_root_get_feature(hidpp,
+                                    HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+                                    &hidpp->wireless_feature_index,
+                                    &feature_type);
+
+       return ret;
+}
+
 /* -------------------------------------------------------------------------- */
 /* 0x2120: Hi-resolution scrolling                                            */
 /* -------------------------------------------------------------------------- */
@@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
                }
        }
 
-       if (unlikely(hidpp_report_is_connect_event(report))) {
+       if (unlikely(hidpp_report_is_connect_event(hidpp, report))) {
                atomic_set(&hidpp->connected,
                                !(report->rap.params[0] & (1 << 6)));
                if (schedule_work(&hidpp->work) == 0)
@@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
                ret = hidpp_solar_battery_event(hidpp, data, size);
                if (ret != 0)
                        return ret;
+               ret = hidpp20_battery_voltage_event(hidpp, data, size);
+               if (ret != 0)
+                       return ret;
        }
 
        if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
 
        hidpp->battery.feature_index = 0xff;
        hidpp->battery.solar_feature_index = 0xff;
+       hidpp->battery.voltage_feature_index = 0xff;
 
        if (hidpp->protocol_major >= 2) {
                if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
                        ret = hidpp_solar_request_battery_event(hidpp);
-               else
-                       ret = hidpp20_query_battery_info(hidpp);
+               else {
+                       ret = hidpp20_query_battery_voltage_info(hidpp);
+                       if (ret)
+                               ret = hidpp20_query_battery_info(hidpp);
+               }
 
                if (ret)
                        return ret;
@@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
        if (!battery_props)
                return -ENOMEM;
 
-       num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
+       num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
 
        if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
                battery_props[num_battery_props++] =
@@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
                battery_props[num_battery_props++] =
                                POWER_SUPPLY_PROP_CAPACITY_LEVEL;
 
+       if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+               battery_props[num_battery_props++] =
+                       POWER_SUPPLY_PROP_VOLTAGE_NOW;
+
        battery = &hidpp->battery;
 
        n = atomic_inc_return(&battery_no) - 1;
@@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
                else
                        hidpp10_query_battery_status(hidpp);
        } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
-               hidpp20_query_battery_info(hidpp);
+               if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+                       hidpp20_query_battery_voltage_info(hidpp);
+               else
+                       hidpp20_query_battery_info(hidpp);
        }
        if (hidpp->battery.ps)
                power_supply_changed(hidpp->battery.ps);
@@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
        return report->field[0]->report_count + 1;
 }
 
-static bool hidpp_validate_device(struct hid_device *hdev)
+static u8 hidpp_validate_device(struct hid_device *hdev)
 {
        struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-       int id, report_length, supported_reports = 0;
+       int id, report_length;
+       u8 supported_reports = 0;
 
        id = REPORT_ID_HIDPP_SHORT;
        report_length = hidpp_get_report_length(hdev, id);
@@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
                if (report_length < HIDPP_REPORT_SHORT_LENGTH)
                        goto bad_device;
 
-               supported_reports++;
+               supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED;
        }
 
        id = REPORT_ID_HIDPP_LONG;
@@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
                if (report_length < HIDPP_REPORT_LONG_LENGTH)
                        goto bad_device;
 
-               supported_reports++;
+               supported_reports |= HIDPP_REPORT_LONG_SUPPORTED;
        }
 
        id = REPORT_ID_HIDPP_VERY_LONG;
@@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev)
                    report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
                        goto bad_device;
 
-               supported_reports++;
+               supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED;
                hidpp->very_long_report_length = report_length;
        }
 
@@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        /*
         * Make sure the device is HID++ capable, otherwise treat as generic HID
         */
-       if (!hidpp_validate_device(hdev)) {
+       hidpp->supported_reports = hidpp_validate_device(hdev);
+
+       if (!hidpp->supported_reports) {
                hid_set_drvdata(hdev, NULL);
                devm_kfree(&hdev->dev, hidpp);
                return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
@@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (ret < 0) {
                dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
                        __func__, ret);
-               hid_hw_stop(hdev);
                goto hid_hw_open_fail;
        }
 
@@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                hidpp_overwrite_name(hdev);
        }
 
+       if (connected && hidpp->protocol_major >= 2) {
+               ret = hidpp_set_wireless_feature_index(hidpp);
+               if (ret == -ENOENT)
+                       hidpp->wireless_feature_index = 0;
+               else if (ret)
+                       goto hid_hw_init_fail;
+       }
+
        if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
                ret = wtp_get_config(hidpp);
                if (ret)
@@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = {
        { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        { /* Mouse Logitech MX Master 2S */
          LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech MX Master 3 */
+         LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        { /* Mouse Logitech Performance MX */
          LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
        { /* Keyboard logitech K400 */
@@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* MX5500 keyboard over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
          .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+       { /* MX Master mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+         .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+         .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* MX Master 3 mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
+         .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        {}
 };
 
index 3cfeb1629f79fec23e0ceadc32be2081c29b1f87..362805ddf377736927fa95493186698a8d5fad0f 100644 (file)
@@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
                tool = MT_TOOL_DIAL;
        else if (unlikely(!confidence_state)) {
                tool = MT_TOOL_PALM;
-               if (!active &&
+               if (!active && mt &&
                    input_mt_is_active(&mt->slots[slotnum])) {
                        /*
                         * The non-confidence was reported for
@@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_LG,
                HID_USB_DEVICE(USB_VENDOR_ID_LG,
                        USB_DEVICE_ID_LG_MELFAS_MT) },
+       { .driver_data = MT_CLS_LG,
+               HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+                       USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
 
        /* MosArt panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
index d1b39c29e3535a0aa40cf34dc52101c1ace87046..0e7b2d998395a91f5d5f9109443501707fe964fc 100644 (file)
@@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
 
        { 0 }
 };
index 8dae0f9b819e011d6695462fea7e88e85cd16669..6286204d4c56029656f6cf68758e75781d9ff0ff 100644 (file)
@@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev,
 
        if (steam->quirks & STEAM_QUIRK_WIRELESS) {
                hid_info(hdev, "Steam wireless receiver connected");
+               /* If using a wireless adaptor ask for connection status */
+               steam->connected = false;
                steam_request_conn_status(steam);
        } else {
+               /* A wired connection is always present */
+               steam->connected = true;
                ret = steam_register(steam);
                if (ret) {
                        hid_err(hdev,
index c3fc0ceb80963c331f1515bacdfa51883708d1ae..2eee5e31c2b7eaf3bb9618aaac7862e82e276cd4 100644 (file)
@@ -249,13 +249,14 @@ out:
 static __poll_t hidraw_poll(struct file *file, poll_table *wait)
 {
        struct hidraw_list *list = file->private_data;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */
 
        poll_wait(file, &list->hidraw->wait, wait);
        if (list->head != list->tail)
-               return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+               mask |= EPOLLIN | EPOLLRDNORM;
        if (!list->hidraw->exist)
-               return EPOLLERR | EPOLLHUP;
-       return 0;
+               mask |= EPOLLERR | EPOLLHUP;
+       return mask;
 }
 
 static int hidraw_open(struct inode *inode, struct file *file)
@@ -450,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
                                                -EFAULT : len;
                                        break;
                                }
+
+                               if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+                                       int len = strlen(hid->uniq) + 1;
+                                       if (len > _IOC_SIZE(cmd))
+                                               len = _IOC_SIZE(cmd);
+                                       ret = copy_to_user(user_arg, hid->uniq, len) ?
+                                               -EFAULT : len;
+                                       break;
+                               }
                        }
 
                ret = -ENOTTY;
index a358e61fbc8272af922b27f789cc544b0bf56e00..009000c5d55cddfaad101f1b33d5b37497e0f176 100644 (file)
@@ -49,6 +49,8 @@
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(5)
+#define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(6)
+
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -175,6 +177,8 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
                 I2C_HID_QUIRK_RESET_ON_RESUME },
+       { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+               I2C_HID_QUIRK_BAD_INPUT_SIZE },
        { 0, 0 }
 };
 
@@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
        }
 
        if ((ret_size > size) || (ret_size < 2)) {
-               dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
-                       __func__, size, ret_size);
-               return;
+               if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) {
+                       ihid->inbuf[0] = size & 0xff;
+                       ihid->inbuf[1] = size >> 8;
+                       ret_size = size;
+               } else {
+                       dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+                               __func__, size, ret_size);
+                       return;
+               }
        }
 
        i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
index 6c1e6110867f0d71ad279d41bc61c3d81f384dcc..1fb294ca463e5b566b577b26eefe5e38c40496d0 100644 (file)
@@ -24,7 +24,9 @@
 #define ICL_MOBILE_DEVICE_ID   0x34FC
 #define SPT_H_DEVICE_ID                0xA135
 #define CML_LP_DEVICE_ID       0x02FC
+#define CMP_H_DEVICE_ID                0x06FC
 #define EHL_Ax_DEVICE_ID       0x4BB3
+#define TGL_LP_DEVICE_ID       0xA0FC
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 784dcc8c702280800704abb903fc3ec3a79e6687..f491d8b4e24c7b39087bfc0a71db3b18d7cc596c 100644 (file)
@@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index fa0cc089982739e9d70da131c7b6235571f0acfa..8fe3efcb832715c4a978625387dc0bb0ea5b00a2 100644 (file)
@@ -766,13 +766,14 @@ unlock:
 static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
 {
        struct uhid_device *uhid = file->private_data;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
 
        poll_wait(file, &uhid->waitq, wait);
 
        if (uhid->head != uhid->tail)
-               return EPOLLIN | EPOLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
-       return 0;
+       return mask;
 }
 
 static const struct file_operations uhid_fops = {
index e421cdf2d1a4ec5554535e8b241592d6c8f3afa7..a970b809d778c3c9a74702abe70c9f1f976a06d9 100644 (file)
@@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file)
        return 0;
 }
 
+static int __hiddev_open(struct hiddev *hiddev, struct file *file)
+{
+       struct hiddev_list *list;
+       int error;
+
+       lockdep_assert_held(&hiddev->existancelock);
+
+       list = vzalloc(sizeof(*list));
+       if (!list)
+               return -ENOMEM;
+
+       mutex_init(&list->thread_lock);
+       list->hiddev = hiddev;
+
+       if (!hiddev->open++) {
+               error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
+               if (error < 0)
+                       goto err_drop_count;
+
+               error = hid_hw_open(hiddev->hid);
+               if (error < 0)
+                       goto err_normal_power;
+       }
+
+       spin_lock_irq(&hiddev->list_lock);
+       list_add_tail(&list->node, &hiddev->list);
+       spin_unlock_irq(&hiddev->list_lock);
+
+       file->private_data = list;
+
+       return 0;
+
+err_normal_power:
+       hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
+err_drop_count:
+       hiddev->open--;
+       vfree(list);
+       return error;
+}
+
 /*
  * open file op
  */
 static int hiddev_open(struct inode *inode, struct file *file)
 {
-       struct hiddev_list *list;
        struct usb_interface *intf;
        struct hid_device *hid;
        struct hiddev *hiddev;
@@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
        intf = usbhid_find_interface(iminor(inode));
        if (!intf)
                return -ENODEV;
+
        hid = usb_get_intfdata(intf);
        hiddev = hid->hiddev;
 
-       if (!(list = vzalloc(sizeof(struct hiddev_list))))
-               return -ENOMEM;
-       mutex_init(&list->thread_lock);
-       list->hiddev = hiddev;
-       file->private_data = list;
-
-       /*
-        * no need for locking because the USB major number
-        * is shared which usbcore guards against disconnect
-        */
-       if (list->hiddev->exist) {
-               if (!list->hiddev->open++) {
-                       res = hid_hw_open(hiddev->hid);
-                       if (res < 0)
-                               goto bail;
-               }
-       } else {
-               res = -ENODEV;
-               goto bail;
-       }
-
-       spin_lock_irq(&list->hiddev->list_lock);
-       list_add_tail(&list->node, &hiddev->list);
-       spin_unlock_irq(&list->hiddev->list_lock);
-
        mutex_lock(&hiddev->existancelock);
-       /*
-        * recheck exist with existance lock held to
-        * avoid opening a disconnected device
-        */
-       if (!list->hiddev->exist) {
-               res = -ENODEV;
-               goto bail_unlock;
-       }
-       if (!list->hiddev->open++)
-               if (list->hiddev->exist) {
-                       struct hid_device *hid = hiddev->hid;
-                       res = hid_hw_power(hid, PM_HINT_FULLON);
-                       if (res < 0)
-                               goto bail_unlock;
-                       res = hid_hw_open(hid);
-                       if (res < 0)
-                               goto bail_normal_power;
-               }
-       mutex_unlock(&hiddev->existancelock);
-       return 0;
-bail_normal_power:
-       hid_hw_power(hid, PM_HINT_NORMAL);
-bail_unlock:
+       res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
        mutex_unlock(&hiddev->existancelock);
 
-       spin_lock_irq(&list->hiddev->list_lock);
-       list_del(&list->node);
-       spin_unlock_irq(&list->hiddev->list_lock);
-bail:
-       file->private_data = NULL;
-       vfree(list);
        return res;
 }
 
index ccb74529bc78243689d0720bbe2d81cfd8a52004..d99a9d407671c88ea96decdd0e561c6f8784685a 100644 (file)
@@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
                    (hdev->product == 0x34d || hdev->product == 0x34e ||  /* MobileStudio Pro */
                     hdev->product == 0x357 || hdev->product == 0x358 ||  /* Intuos Pro 2 */
                     hdev->product == 0x392 ||                            /* Intuos Pro 2 */
-                    hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
+                    hdev->product == 0x398 || hdev->product == 0x399 ||  /* MobileStudio Pro */
+                    hdev->product == 0x3AA)) {                           /* MobileStudio Pro */
                        value = (field->logical_maximum - value);
 
                        if (hdev->product == 0x357 || hdev->product == 0x358 ||
                            hdev->product == 0x392)
                                value = wacom_offset_rotation(input, usage, value, 3, 16);
                        else if (hdev->product == 0x34d || hdev->product == 0x34e ||
-                                hdev->product == 0x398 || hdev->product == 0x399)
+                                hdev->product == 0x398 || hdev->product == 0x399 ||
+                                hdev->product == 0x3AA)
                                value = wacom_offset_rotation(input, usage, value, 1, 2);
                }
                else {
index 766bd84573461a58d5c4d6dc52fec876502c050b..296f9098c9e4624a38d0382c3266a94af26a02eb 100644 (file)
@@ -211,7 +211,7 @@ static struct timespec64 hv_get_adj_host_time(void)
        unsigned long flags;
 
        spin_lock_irqsave(&host_ts.lock, flags);
-       reftime = hyperv_cs->read(hyperv_cs);
+       reftime = hv_read_reference_counter();
        newtime = host_ts.host_time + (reftime - host_ts.ref_time);
        ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
        spin_unlock_irqrestore(&host_ts.lock, flags);
@@ -250,7 +250,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
         */
        spin_lock_irqsave(&host_ts.lock, flags);
 
-       cur_reftime = hyperv_cs->read(hyperv_cs);
+       cur_reftime = hv_read_reference_counter();
        host_ts.host_time = hosttime;
        host_ts.ref_time = cur_reftime;
 
@@ -315,7 +315,7 @@ static void timesync_onchannelcallback(void *context)
                                        sizeof(struct vmbuspipe_hdr) +
                                        sizeof(struct icmsg_hdr)];
                                adj_guesttime(timedatap->parenttime,
-                                             hyperv_cs->read(hyperv_cs),
+                                             hv_read_reference_counter(),
                                              timedatap->flags);
                        }
                }
@@ -524,7 +524,7 @@ static struct ptp_clock *hv_ptp_clock;
 static int hv_timesync_init(struct hv_util_service *srv)
 {
        /* TimeSync requires Hyper-V clocksource. */
-       if (!hyperv_cs)
+       if (!hv_read_reference_counter)
                return -ENODEV;
 
        spin_lock_init(&host_ts.lock);
index 23dfe848979a27a5536a80a84a47215b53ef5bd0..47ac20aee06fc581f0d5a341e7388afa42accba2 100644 (file)
@@ -164,6 +164,16 @@ config SENSORS_ADM1031
          This driver can also be built as a module. If so, the module
          will be called adm1031.
 
+config SENSORS_ADM1177
+       tristate "Analog Devices ADM1177 and compatibles"
+       depends on I2C
+       help
+         If you say yes here you get support for Analog Devices ADM1177
+         sensor chips.
+
+         This driver can also be built as a module.  If so, the module
+         will be called adm1177.
+
 config SENSORS_ADM9240
        tristate "Analog Devices ADM9240 and compatibles"
        depends on I2C
@@ -385,6 +395,16 @@ config SENSORS_ATXP1
          This driver can also be built as a module. If so, the module
          will be called atxp1.
 
+config SENSORS_DRIVETEMP
+       tristate "Hard disk drives with temperature sensors"
+       depends on SCSI && ATA
+       help
+         If you say yes you get support for the temperature sensor on
+         hard disk drives.
+
+         This driver can also be built as a module. If so, the module
+         will be called satatemp.
+
 config SENSORS_DS620
        tristate "Dallas Semiconductor DS620"
        depends on I2C
@@ -889,7 +909,7 @@ config SENSORS_MAX197
          will be called max197.
 
 config SENSORS_MAX31722
-tristate "MAX31722 temperature sensor"
+       tristate "MAX31722 temperature sensor"
        depends on SPI
        help
          Support for the Maxim Integrated MAX31722/MAX31723 digital
@@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor"
          This driver can also be built as a module. If so, the module
          will be called max31722.
 
+config SENSORS_MAX31730
+       tristate "MAX31730 temperature sensor"
+       depends on I2C
+       help
+         Support for the Maxim Integrated MAX31730 3-Channel Remote
+         Temperature Sensor.
+
+         This driver can also be built as a module. If so, the module
+         will be called max31730.
+
 config SENSORS_MAX6621
        tristate "Maxim MAX6621 sensor chip"
        depends on I2C
@@ -1905,7 +1935,7 @@ config SENSORS_W83627HF
          will be called w83627hf.
 
 config SENSORS_W83627EHF
-       tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+       tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
        depends on !PPC
        select HWMON_VID
        help
@@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF
          the Core 2 Duo. And also the W83627UHG, which is a stripped down
          version of the W83627DHG (as far as hardware monitoring goes.)
 
-         This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
-         (also known as W83667HG-I), and NCT6776F.
+         This driver also supports Nuvoton W83667HG and W83667HG-B.
 
          This driver can also be built as a module. If so, the module
          will be called w83627ehf.
index 6db5db9cdc299fa0b6319dbecef46c79157e2f55..613f509879653da9de3a57c1f8792e18bbfd0c63 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
 obj-$(CONFIG_SENSORS_ADM1026)  += adm1026.o
 obj-$(CONFIG_SENSORS_ADM1029)  += adm1029.o
 obj-$(CONFIG_SENSORS_ADM1031)  += adm1031.o
+obj-$(CONFIG_SENSORS_ADM1177)  += adm1177.o
 obj-$(CONFIG_SENSORS_ADM9240)  += adm9240.o
 obj-$(CONFIG_SENSORS_ADS7828)  += ads7828.o
 obj-$(CONFIG_SENSORS_ADS7871)  += ads7871.o
@@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
 obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
 obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
 obj-$(CONFIG_SENSORS_DME1737)  += dme1737.o
+obj-$(CONFIG_SENSORS_DRIVETEMP)        += drivetemp.o
 obj-$(CONFIG_SENSORS_DS620)    += ds620.o
 obj-$(CONFIG_SENSORS_DS1621)   += ds1621.o
 obj-$(CONFIG_SENSORS_EMC1403)  += emc1403.o
@@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619)       += max1619.o
 obj-$(CONFIG_SENSORS_MAX1668)  += max1668.o
 obj-$(CONFIG_SENSORS_MAX197)   += max197.o
 obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
 obj-$(CONFIG_SENSORS_MAX6621)  += max6621.o
 obj-$(CONFIG_SENSORS_MAX6639)  += max6639.o
 obj-$(CONFIG_SENSORS_MAX6642)  += max6642.o
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
new file mode 100644 (file)
index 0000000..d314223
--- /dev/null
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin
+ *
+ * Copyright 2015-2019 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+/*  Command Byte Operations */
+#define ADM1177_CMD_V_CONT     BIT(0)
+#define ADM1177_CMD_I_CONT     BIT(2)
+#define ADM1177_CMD_VRANGE     BIT(4)
+
+/* Extended Register */
+#define ADM1177_REG_ALERT_TH   2
+
+#define ADM1177_BITS           12
+
+/**
+ * struct adm1177_state - driver instance specific data
+ * @client             pointer to i2c client
+ * @reg                        regulator info for the the power supply of the device
+ * @r_sense_uohm       current sense resistor value
+ * @alert_threshold_ua current limit for shutdown
+ * @vrange_high                internal voltage divider
+ */
+struct adm1177_state {
+       struct i2c_client       *client;
+       struct regulator        *reg;
+       u32                     r_sense_uohm;
+       u32                     alert_threshold_ua;
+       bool                    vrange_high;
+};
+
+static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data)
+{
+       return i2c_master_recv(st->client, data, num);
+}
+
+static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd)
+{
+       return i2c_smbus_write_byte(st->client, cmd);
+}
+
+static int adm1177_write_alert_thr(struct adm1177_state *st,
+                                  u32 alert_threshold_ua)
+{
+       u64 val;
+       int ret;
+
+       val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm;
+       val = div_u64(val, 105840000U);
+       val = div_u64(val, 1000U);
+       if (val > 0xFF)
+               val = 0xFF;
+
+       ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH,
+                                       val);
+       if (ret)
+               return ret;
+
+       st->alert_threshold_ua = alert_threshold_ua;
+       return 0;
+}
+
+static int adm1177_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       struct adm1177_state *st = dev_get_drvdata(dev);
+       u8 data[3];
+       long dummy;
+       int ret;
+
+       switch (type) {
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_input:
+                       ret = adm1177_read_raw(st, 3, data);
+                       if (ret < 0)
+                               return ret;
+                       dummy = (data[1] << 4) | (data[2] & 0xF);
+                       /*
+                        * convert to milliamperes
+                        * ((105.84mV / 4096) x raw) / senseResistor(ohm)
+                        */
+                       *val = div_u64((105840000ull * dummy),
+                                      4096 * st->r_sense_uohm);
+                       return 0;
+               case hwmon_curr_max_alarm:
+                       *val = st->alert_threshold_ua;
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       case hwmon_in:
+               ret = adm1177_read_raw(st, 3, data);
+               if (ret < 0)
+                       return ret;
+               dummy = (data[0] << 4) | (data[2] >> 4);
+               /*
+                * convert to millivolts based on resistor devision
+                * (V_fullscale / 4096) * raw
+                */
+               if (st->vrange_high)
+                       dummy *= 26350;
+               else
+                       dummy *= 6650;
+
+               *val = DIV_ROUND_CLOSEST(dummy, 4096);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int adm1177_write(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long val)
+{
+       struct adm1177_state *st = dev_get_drvdata(dev);
+
+       switch (type) {
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_max_alarm:
+                       adm1177_write_alert_thr(st, val);
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static umode_t adm1177_is_visible(const void *data,
+                                 enum hwmon_sensor_types type,
+                                 u32 attr, int channel)
+{
+       const struct adm1177_state *st = data;
+
+       switch (type) {
+       case hwmon_in:
+               switch (attr) {
+               case hwmon_in_input:
+                       return 0444;
+               }
+               break;
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_input:
+                       if (st->r_sense_uohm)
+                               return 0444;
+                       return 0;
+               case hwmon_curr_max_alarm:
+                       if (st->r_sense_uohm)
+                               return 0644;
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static const struct hwmon_channel_info *adm1177_info[] = {
+       HWMON_CHANNEL_INFO(curr,
+                          HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+       HWMON_CHANNEL_INFO(in,
+                          HWMON_I_INPUT),
+       NULL
+};
+
+static const struct hwmon_ops adm1177_hwmon_ops = {
+       .is_visible = adm1177_is_visible,
+       .read = adm1177_read,
+       .write = adm1177_write,
+};
+
+static const struct hwmon_chip_info adm1177_chip_info = {
+       .ops = &adm1177_hwmon_ops,
+       .info = adm1177_info,
+};
+
+static void adm1177_remove(void *data)
+{
+       struct adm1177_state *st = data;
+
+       regulator_disable(st->reg);
+}
+
+static int adm1177_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       struct adm1177_state *st;
+       u32 alert_threshold_ua;
+       int ret;
+
+       st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return -ENOMEM;
+
+       st->client = client;
+
+       st->reg = devm_regulator_get_optional(&client->dev, "vref");
+       if (IS_ERR(st->reg)) {
+               if (PTR_ERR(st->reg) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               st->reg = NULL;
+       } else {
+               ret = regulator_enable(st->reg);
+               if (ret)
+                       return ret;
+               ret = devm_add_action_or_reset(&client->dev, adm1177_remove,
+                                              st);
+               if (ret)
+                       return ret;
+       }
+
+       if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+                                    &st->r_sense_uohm))
+               st->r_sense_uohm = 0;
+       if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp",
+                                    &alert_threshold_ua)) {
+               if (st->r_sense_uohm)
+                       /*
+                        * set maximum default value from datasheet based on
+                        * shunt-resistor
+                        */
+                       alert_threshold_ua = div_u64(105840000000,
+                                                    st->r_sense_uohm);
+               else
+                       alert_threshold_ua = 0;
+       }
+       st->vrange_high = device_property_read_bool(dev,
+                                                   "adi,vrange-high-enable");
+       if (alert_threshold_ua && st->r_sense_uohm)
+               adm1177_write_alert_thr(st, alert_threshold_ua);
+
+       ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT |
+                                   ADM1177_CMD_I_CONT |
+                                   (st->vrange_high ? 0 : ADM1177_CMD_VRANGE));
+       if (ret)
+               return ret;
+
+       hwmon_dev =
+               devm_hwmon_device_register_with_info(dev, client->name, st,
+                                                    &adm1177_chip_info, NULL);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id adm1177_id[] = {
+       {"adm1177", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, adm1177_id);
+
+static const struct of_device_id adm1177_dt_ids[] = {
+       { .compatible = "adi,adm1177" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
+
+static struct i2c_driver adm1177_driver = {
+       .class = I2C_CLASS_HWMON,
+       .driver = {
+               .name = "adm1177",
+               .of_match_table = adm1177_dt_ids,
+       },
+       .probe = adm1177_probe,
+       .id_table = adm1177_id,
+};
+module_i2c_driver(adm1177_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver");
+MODULE_LICENSE("GPL v2");
index 6c64d50c9aae8ec175d9477fa98af6c851849d2f..01c2eeb02aa965e176eed3c0cdb8a9e393c95c63 100644 (file)
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
        long reg;
 
        if (bypass_attn & (1 << channel))
-               reg = (volt * 1024) / 2250;
+               reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
        else
-               reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+               reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+                                       (r[0] + r[1]) * 2250);
        return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
new file mode 100644 (file)
index 0000000..370d0c7
--- /dev/null
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hwmon client for disk and solid state drives with temperature sensors
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ *
+ * With input from:
+ *    Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors.
+ *    (C) 2018 Linus Walleij
+ *
+ *    hwmon: Driver for SCSI/ATA temperature sensors
+ *    by Constantin Baranov <const@mimas.ru>, submitted September 2009
+ *
+ * This drive supports reporting the temperatire of SATA drives. It can be
+ * easily extended to report the temperature of SCSI drives.
+ *
+ * The primary means to read drive temperatures and temperature limits
+ * for ATA drives is the SCT Command Transport feature set as specified in
+ * ATA8-ACS.
+ * It can be used to read the current drive temperature, temperature limits,
+ * and historic minimum and maximum temperatures. The SCT Command Transport
+ * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set
+ * (ATA8-ACS)".
+ *
+ * If the SCT Command Transport feature set is not available, drive temperatures
+ * may be readable through SMART attributes. Since SMART attributes are not well
+ * defined, this method is only used as fallback mechanism.
+ *
+ * There are three SMART attributes which may report drive temperatures.
+ * Those are defined as follows (from
+ * http://www.cropel.com/library/smart-attribute-list.aspx).
+ *
+ * 190 Temperature     Temperature, monitored by a sensor somewhere inside
+ *                     the drive. Raw value typicaly holds the actual
+ *                     temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 194 Temperature     Temperature, monitored by a sensor somewhere inside
+ *                     the drive. Raw value typicaly holds the actual
+ *                     temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 231 Temperature     Temperature, monitored by a sensor somewhere inside
+ *                     the drive. Raw value typicaly holds the actual
+ *                     temperature (hexadecimal) in its rightmost two digits.
+ *
+ * Wikipedia defines attributes a bit differently.
+ *
+ * 190 Temperature     Value is equal to (100-temp. °C), allowing manufacturer
+ *     Difference or   to set a minimum threshold which corresponds to a
+ *     Airflow         maximum temperature. This also follows the convention of
+ *     Temperature     100 being a best-case value and lower values being
+ *                     undesirable. However, some older drives may instead
+ *                     report raw Temperature (identical to 0xC2) or
+ *                     Temperature minus 50 here.
+ * 194 Temperature or  Indicates the device temperature, if the appropriate
+ *     Temperature     sensor is fitted. Lowest byte of the raw value contains
+ *     Celsius         the exact temperature value (Celsius degrees).
+ * 231 Life Left       Indicates the approximate SSD life left, in terms of
+ *     (SSDs) or       program/erase cycles or available reserved blocks.
+ *     Temperature     A normalized value of 100 represents a new drive, with
+ *                     a threshold value at 10 indicating a need for
+ *                     replacement. A value of 0 may mean that the drive is
+ *                     operating in read-only mode to allow data recovery.
+ *                     Previously (pre-2010) occasionally used for Drive
+ *                     Temperature (more typically reported at 0xC2).
+ *
+ * Common denominator is that the first raw byte reports the temperature
+ * in degrees C on almost all drives. Some drives may report a fractional
+ * temperature in the second raw byte.
+ *
+ * Known exceptions (from libatasmart):
+ * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th
+ *   degrees C in the first two raw bytes.
+ * - A few Maxtor drives report an unknown or bad value in attribute 194.
+ * - Certain Apple SSD drives report an unknown value in attribute 190.
+ *   Only certain firmware versions are affected.
+ *
+ * Those exceptions affect older ATA drives and are currently ignored.
+ * Also, the second raw byte (possibly reporting the fractional temperature)
+ * is currently ignored.
+ *
+ * Many drives also report temperature limits in additional SMART data raw
+ * bytes. The format of those is not well defined and varies widely.
+ * The driver does not currently attempt to report those limits.
+ *
+ * According to data in smartmontools, attribute 231 is rarely used to report
+ * drive temperatures. At the same time, several drives report SSD life left
+ * in attribute 231, but do not support temperature sensors. For this reason,
+ * attribute 231 is currently ignored.
+ *
+ * Following above definitions, temperatures are reported as follows.
+ *   If SCT Command Transport is supported, it is used to read the
+ *   temperature and, if available, temperature limits.
+ * - Otherwise, if SMART attribute 194 is supported, it is used to read
+ *   the temperature.
+ * - Otherwise, if SMART attribute 190 is supported, it is used to read
+ *   the temperature.
+ */
+
+#include <linux/ata.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_proto.h>
+
+struct drivetemp_data {
+       struct list_head list;          /* list of instantiated devices */
+       struct mutex lock;              /* protect data buffer accesses */
+       struct scsi_device *sdev;       /* SCSI device */
+       struct device *dev;             /* instantiating device */
+       struct device *hwdev;           /* hardware monitoring device */
+       u8 smartdata[ATA_SECT_SIZE];    /* local buffer */
+       int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val);
+       bool have_temp_lowest;          /* lowest temp in SCT status */
+       bool have_temp_highest;         /* highest temp in SCT status */
+       bool have_temp_min;             /* have min temp */
+       bool have_temp_max;             /* have max temp */
+       bool have_temp_lcrit;           /* have lower critical limit */
+       bool have_temp_crit;            /* have critical limit */
+       int temp_min;                   /* min temp */
+       int temp_max;                   /* max temp */
+       int temp_lcrit;                 /* lower critical limit */
+       int temp_crit;                  /* critical limit */
+};
+
+static LIST_HEAD(drivetemp_devlist);
+
+#define ATA_MAX_SMART_ATTRS    30
+#define SMART_TEMP_PROP_190    190
+#define SMART_TEMP_PROP_194    194
+
+#define SCT_STATUS_REQ_ADDR    0xe0
+#define  SCT_STATUS_VERSION_LOW                0       /* log byte offsets */
+#define  SCT_STATUS_VERSION_HIGH       1
+#define  SCT_STATUS_TEMP               200
+#define  SCT_STATUS_TEMP_LOWEST                201
+#define  SCT_STATUS_TEMP_HIGHEST       202
+#define SCT_READ_LOG_ADDR      0xe1
+#define  SMART_READ_LOG                        0xd5
+#define  SMART_WRITE_LOG               0xd6
+
+#define INVALID_TEMP           0x80
+
+#define temp_is_valid(temp)    ((temp) != INVALID_TEMP)
+#define temp_from_sct(temp)    (((s8)(temp)) * 1000)
+
+static inline bool ata_id_smart_supported(u16 *id)
+{
+       return id[ATA_ID_COMMAND_SET_1] & BIT(0);
+}
+
+static inline bool ata_id_smart_enabled(u16 *id)
+{
+       return id[ATA_ID_CFS_ENABLE_1] & BIT(0);
+}
+
+static int drivetemp_scsi_command(struct drivetemp_data *st,
+                                u8 ata_command, u8 feature,
+                                u8 lba_low, u8 lba_mid, u8 lba_high)
+{
+       u8 scsi_cmd[MAX_COMMAND_SIZE];
+       int data_dir;
+
+       memset(scsi_cmd, 0, sizeof(scsi_cmd));
+       scsi_cmd[0] = ATA_16;
+       if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) {
+               scsi_cmd[1] = (5 << 1); /* PIO Data-out */
+               /*
+                * No off.line or cc, write to dev, block count in sector count
+                * field.
+                */
+               scsi_cmd[2] = 0x06;
+               data_dir = DMA_TO_DEVICE;
+       } else {
+               scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+               /*
+                * No off.line or cc, read from dev, block count in sector count
+                * field.
+                */
+               scsi_cmd[2] = 0x0e;
+               data_dir = DMA_FROM_DEVICE;
+       }
+       scsi_cmd[4] = feature;
+       scsi_cmd[6] = 1;        /* 1 sector */
+       scsi_cmd[8] = lba_low;
+       scsi_cmd[10] = lba_mid;
+       scsi_cmd[12] = lba_high;
+       scsi_cmd[14] = ata_command;
+
+       return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
+                               st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
+                               NULL);
+}
+
+static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+                                u8 select)
+{
+       return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select,
+                                    ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS);
+}
+
+static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr,
+                                 long *temp)
+{
+       u8 *buf = st->smartdata;
+       bool have_temp = false;
+       u8 temp_raw;
+       u8 csum;
+       int err;
+       int i;
+
+       err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0);
+       if (err)
+               return err;
+
+       /* Checksum the read value table */
+       csum = 0;
+       for (i = 0; i < ATA_SECT_SIZE; i++)
+               csum += buf[i];
+       if (csum) {
+               dev_dbg(&st->sdev->sdev_gendev,
+                       "checksum error reading SMART values\n");
+               return -EIO;
+       }
+
+       for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) {
+               u8 *attr = buf + i * 12;
+               int id = attr[2];
+
+               if (!id)
+                       continue;
+
+               if (id == SMART_TEMP_PROP_190) {
+                       temp_raw = attr[7];
+                       have_temp = true;
+               }
+               if (id == SMART_TEMP_PROP_194) {
+                       temp_raw = attr[7];
+                       have_temp = true;
+                       break;
+               }
+       }
+
+       if (have_temp) {
+               *temp = temp_raw * 1000;
+               return 0;
+       }
+
+       return -ENXIO;
+}
+
+static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+{
+       u8 *buf = st->smartdata;
+       int err;
+
+       err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+       if (err)
+               return err;
+       switch (attr) {
+       case hwmon_temp_input:
+               *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+               break;
+       case hwmon_temp_lowest:
+               *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+               break;
+       case hwmon_temp_highest:
+               *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int drivetemp_identify_sata(struct drivetemp_data *st)
+{
+       struct scsi_device *sdev = st->sdev;
+       u8 *buf = st->smartdata;
+       struct scsi_vpd *vpd;
+       bool is_ata, is_sata;
+       bool have_sct_data_table;
+       bool have_sct_temp;
+       bool have_smart;
+       bool have_sct;
+       u16 *ata_id;
+       u16 version;
+       long temp;
+       int err;
+
+       /* SCSI-ATA Translation present? */
+       rcu_read_lock();
+       vpd = rcu_dereference(sdev->vpd_pg89);
+
+       /*
+        * Verify that ATA IDENTIFY DEVICE data is included in ATA Information
+        * VPD and that the drive implements the SATA protocol.
+        */
+       if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA ||
+           vpd->data[36] != 0x34) {
+               rcu_read_unlock();
+               return -ENODEV;
+       }
+       ata_id = (u16 *)&vpd->data[60];
+       is_ata = ata_id_is_ata(ata_id);
+       is_sata = ata_id_is_sata(ata_id);
+       have_sct = ata_id_sct_supported(ata_id);
+       have_sct_data_table = ata_id_sct_data_tables(ata_id);
+       have_smart = ata_id_smart_supported(ata_id) &&
+                               ata_id_smart_enabled(ata_id);
+
+       rcu_read_unlock();
+
+       /* bail out if this is not a SATA device */
+       if (!is_ata || !is_sata)
+               return -ENODEV;
+       if (!have_sct)
+               goto skip_sct;
+
+       err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+       if (err)
+               goto skip_sct;
+
+       version = (buf[SCT_STATUS_VERSION_HIGH] << 8) |
+                 buf[SCT_STATUS_VERSION_LOW];
+       if (version != 2 && version != 3)
+               goto skip_sct;
+
+       have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]);
+       if (!have_sct_temp)
+               goto skip_sct;
+
+       st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]);
+       st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+       if (!have_sct_data_table)
+               goto skip_sct;
+
+       /* Request and read temperature history table */
+       memset(buf, '\0', sizeof(st->smartdata));
+       buf[0] = 5;     /* data table command */
+       buf[2] = 1;     /* read table */
+       buf[4] = 2;     /* temperature history table */
+
+       err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR);
+       if (err)
+               goto skip_sct_data;
+
+       err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR);
+       if (err)
+               goto skip_sct_data;
+
+       /*
+        * Temperature limits per AT Attachment 8 -
+        * ATA/ATAPI Command Set (ATA8-ACS)
+        */
+       st->have_temp_max = temp_is_valid(buf[6]);
+       st->have_temp_crit = temp_is_valid(buf[7]);
+       st->have_temp_min = temp_is_valid(buf[8]);
+       st->have_temp_lcrit = temp_is_valid(buf[9]);
+
+       st->temp_max = temp_from_sct(buf[6]);
+       st->temp_crit = temp_from_sct(buf[7]);
+       st->temp_min = temp_from_sct(buf[8]);
+       st->temp_lcrit = temp_from_sct(buf[9]);
+
+skip_sct_data:
+       if (have_sct_temp) {
+               st->get_temp = drivetemp_get_scttemp;
+               return 0;
+       }
+skip_sct:
+       if (!have_smart)
+               return -ENODEV;
+       st->get_temp = drivetemp_get_smarttemp;
+       return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp);
+}
+
+static int drivetemp_identify(struct drivetemp_data *st)
+{
+       struct scsi_device *sdev = st->sdev;
+
+       /* Bail out immediately if there is no inquiry data */
+       if (!sdev->inquiry || sdev->inquiry_len < 16)
+               return -ENODEV;
+
+       /* Disk device? */
+       if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC)
+               return -ENODEV;
+
+       return drivetemp_identify_sata(st);
+}
+
+static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long *val)
+{
+       struct drivetemp_data *st = dev_get_drvdata(dev);
+       int err = 0;
+
+       if (type != hwmon_temp)
+               return -EINVAL;
+
+       switch (attr) {
+       case hwmon_temp_input:
+       case hwmon_temp_lowest:
+       case hwmon_temp_highest:
+               mutex_lock(&st->lock);
+               err = st->get_temp(st, attr, val);
+               mutex_unlock(&st->lock);
+               break;
+       case hwmon_temp_lcrit:
+               *val = st->temp_lcrit;
+               break;
+       case hwmon_temp_min:
+               *val = st->temp_min;
+               break;
+       case hwmon_temp_max:
+               *val = st->temp_max;
+               break;
+       case hwmon_temp_crit:
+               *val = st->temp_crit;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static umode_t drivetemp_is_visible(const void *data,
+                                  enum hwmon_sensor_types type,
+                                  u32 attr, int channel)
+{
+       const struct drivetemp_data *st = data;
+
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+                       return 0444;
+               case hwmon_temp_lowest:
+                       if (st->have_temp_lowest)
+                               return 0444;
+                       break;
+               case hwmon_temp_highest:
+                       if (st->have_temp_highest)
+                               return 0444;
+                       break;
+               case hwmon_temp_min:
+                       if (st->have_temp_min)
+                               return 0444;
+                       break;
+               case hwmon_temp_max:
+                       if (st->have_temp_max)
+                               return 0444;
+                       break;
+               case hwmon_temp_lcrit:
+                       if (st->have_temp_lcrit)
+                               return 0444;
+                       break;
+               case hwmon_temp_crit:
+                       if (st->have_temp_crit)
+                               return 0444;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static const struct hwmon_channel_info *drivetemp_info[] = {
+       HWMON_CHANNEL_INFO(chip,
+                          HWMON_C_REGISTER_TZ),
+       HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+                          HWMON_T_LOWEST | HWMON_T_HIGHEST |
+                          HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_LCRIT | HWMON_T_CRIT),
+       NULL
+};
+
+static const struct hwmon_ops drivetemp_ops = {
+       .is_visible = drivetemp_is_visible,
+       .read = drivetemp_read,
+};
+
+static const struct hwmon_chip_info drivetemp_chip_info = {
+       .ops = &drivetemp_ops,
+       .info = drivetemp_info,
+};
+
+/*
+ * The device argument points to sdev->sdev_dev. Its parent is
+ * sdev->sdev_gendev, which we can use to get the scsi_device pointer.
+ */
+static int drivetemp_add(struct device *dev, struct class_interface *intf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev->parent);
+       struct drivetemp_data *st;
+       int err;
+
+       st = kzalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return -ENOMEM;
+
+       st->sdev = sdev;
+       st->dev = dev;
+       mutex_init(&st->lock);
+
+       if (drivetemp_identify(st)) {
+               err = -ENODEV;
+               goto abort;
+       }
+
+       st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp",
+                                                   st, &drivetemp_chip_info,
+                                                   NULL);
+       if (IS_ERR(st->hwdev)) {
+               err = PTR_ERR(st->hwdev);
+               goto abort;
+       }
+
+       list_add(&st->list, &drivetemp_devlist);
+       return 0;
+
+abort:
+       kfree(st);
+       return err;
+}
+
+static void drivetemp_remove(struct device *dev, struct class_interface *intf)
+{
+       struct drivetemp_data *st, *tmp;
+
+       list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) {
+               if (st->dev == dev) {
+                       list_del(&st->list);
+                       hwmon_device_unregister(st->hwdev);
+                       kfree(st);
+                       break;
+               }
+       }
+}
+
+static struct class_interface drivetemp_interface = {
+       .add_dev = drivetemp_add,
+       .remove_dev = drivetemp_remove,
+};
+
+static int __init drivetemp_init(void)
+{
+       return scsi_register_interface(&drivetemp_interface);
+}
+
+static void __exit drivetemp_exit(void)
+{
+       scsi_unregister_interface(&drivetemp_interface);
+}
+
+module_init(drivetemp_init);
+module_exit(drivetemp_exit);
+
+MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
+MODULE_DESCRIPTION("Hard drive temperature monitor");
+MODULE_LICENSE("GPL");
index 1f3b30b085b9bfcbd7cec9bea9e1f4175fb78498..6a30fb453f7adce591062868866e8dda6f158f8d 100644 (file)
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
 
 #define to_hwmon_attr(d) \
        container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
 
 /*
  * Thermal zone information
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
  * also provides the sensor index.
  */
 struct hwmon_thermal_data {
-       struct hwmon_device *hwdev;     /* Reference to hwmon device */
+       struct device *dev;             /* Reference to hwmon device */
        int index;                      /* sensor index */
 };
 
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
        NULL
 };
 
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+       int i;
+
+       for (i = 0; attrs[i]; i++) {
+               struct device_attribute *dattr = to_dev_attr(attrs[i]);
+               struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+               kfree(hattr);
+       }
+       kfree(attrs);
+}
+
 static void hwmon_dev_release(struct device *dev)
 {
-       kfree(to_hwmon_device(dev));
+       struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+       if (hwdev->group.attrs)
+               hwmon_free_attrs(hwdev->group.attrs);
+       kfree(hwdev->groups);
+       kfree(hwdev);
 }
 
 static struct class hwmon_class = {
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
 static int hwmon_thermal_get_temp(void *data, int *temp)
 {
        struct hwmon_thermal_data *tdata = data;
-       struct hwmon_device *hwdev = tdata->hwdev;
+       struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
        int ret;
        long t;
 
-       ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+       ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
                                     tdata->index, &t);
        if (ret < 0)
                return ret;
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
        .get_temp = hwmon_thermal_get_temp,
 };
 
-static int hwmon_thermal_add_sensor(struct device *dev,
-                                   struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
 {
        struct hwmon_thermal_data *tdata;
        struct thermal_zone_device *tzd;
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        if (!tdata)
                return -ENOMEM;
 
-       tdata->hwdev = hwdev;
+       tdata->dev = dev;
        tdata->index = index;
 
-       tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+       tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
                                                   &hwmon_thermal_ops);
        /*
         * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        return 0;
 }
 #else
-static int hwmon_thermal_add_sensor(struct device *dev,
-                                   struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
 {
        return 0;
 }
@@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
 
 static int hwmon_attr_base(enum hwmon_sensor_types type)
 {
-       if (type == hwmon_in)
+       if (type == hwmon_in || type == hwmon_intrusion)
                return 0;
        return 1;
 }
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
               (type == hwmon_fan && attr == hwmon_fan_label);
 }
 
-static struct attribute *hwmon_genattr(struct device *dev,
-                                      const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
                                       enum hwmon_sensor_types type,
                                       u32 attr,
                                       int index,
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
        if ((mode & 0222) && !ops->write)
                return ERR_PTR(-EINVAL);
 
-       hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+       hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
        if (!hattr)
                return ERR_PTR(-ENOMEM);
 
@@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = {
 };
 
 static const char * const hwmon_temp_attr_templates[] = {
+       [hwmon_temp_enable] = "temp%d_enable",
        [hwmon_temp_input] = "temp%d_input",
        [hwmon_temp_type] = "temp%d_type",
        [hwmon_temp_lcrit] = "temp%d_lcrit",
@@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = {
 };
 
 static const char * const hwmon_in_attr_templates[] = {
+       [hwmon_in_enable] = "in%d_enable",
        [hwmon_in_input] = "in%d_input",
        [hwmon_in_min] = "in%d_min",
        [hwmon_in_max] = "in%d_max",
@@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = {
        [hwmon_in_max_alarm] = "in%d_max_alarm",
        [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
        [hwmon_in_crit_alarm] = "in%d_crit_alarm",
-       [hwmon_in_enable] = "in%d_enable",
 };
 
 static const char * const hwmon_curr_attr_templates[] = {
+       [hwmon_curr_enable] = "curr%d_enable",
        [hwmon_curr_input] = "curr%d_input",
        [hwmon_curr_min] = "curr%d_min",
        [hwmon_curr_max] = "curr%d_max",
@@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = {
 };
 
 static const char * const hwmon_power_attr_templates[] = {
+       [hwmon_power_enable] = "power%d_enable",
        [hwmon_power_average] = "power%d_average",
        [hwmon_power_average_interval] = "power%d_average_interval",
        [hwmon_power_average_interval_max] = "power%d_interval_max",
@@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = {
 };
 
 static const char * const hwmon_energy_attr_templates[] = {
+       [hwmon_energy_enable] = "energy%d_enable",
        [hwmon_energy_input] = "energy%d_input",
        [hwmon_energy_label] = "energy%d_label",
 };
 
 static const char * const hwmon_humidity_attr_templates[] = {
+       [hwmon_humidity_enable] = "humidity%d_enable",
        [hwmon_humidity_input] = "humidity%d_input",
        [hwmon_humidity_label] = "humidity%d_label",
        [hwmon_humidity_min] = "humidity%d_min",
@@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = {
 };
 
 static const char * const hwmon_fan_attr_templates[] = {
+       [hwmon_fan_enable] = "fan%d_enable",
        [hwmon_fan_input] = "fan%d_input",
        [hwmon_fan_label] = "fan%d_label",
        [hwmon_fan_min] = "fan%d_min",
@@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = {
        [hwmon_pwm_freq] = "pwm%d_freq",
 };
 
+static const char * const hwmon_intrusion_attr_templates[] = {
+       [hwmon_intrusion_alarm] = "intrusion%d_alarm",
+       [hwmon_intrusion_beep]  = "intrusion%d_beep",
+};
+
 static const char * const *__templates[] = {
        [hwmon_chip] = hwmon_chip_attrs,
        [hwmon_temp] = hwmon_temp_attr_templates,
@@ -468,6 +495,7 @@ static const char * const *__templates[] = {
        [hwmon_humidity] = hwmon_humidity_attr_templates,
        [hwmon_fan] = hwmon_fan_attr_templates,
        [hwmon_pwm] = hwmon_pwm_attr_templates,
+       [hwmon_intrusion] = hwmon_intrusion_attr_templates,
 };
 
 static const int __templates_size[] = {
@@ -480,6 +508,7 @@ static const int __templates_size[] = {
        [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
        [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
        [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+       [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
 };
 
 static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
@@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
        return n;
 }
 
-static int hwmon_genattrs(struct device *dev,
-                         const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
                          struct attribute **attrs,
                          const struct hwmon_ops *ops,
                          const struct hwmon_channel_info *info)
@@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev,
                        attr_mask &= ~BIT(attr);
                        if (attr >= template_size)
                                return -EINVAL;
-                       a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+                       a = hwmon_genattr(drvdata, info->type, attr, i,
                                          templates[attr], ops);
                        if (IS_ERR(a)) {
                                if (PTR_ERR(a) != -ENOENT)
@@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev,
 }
 
 static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
-                    const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
 {
        int ret, i, aindex = 0, nattrs = 0;
        struct attribute **attrs;
@@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
        if (nattrs == 0)
                return ERR_PTR(-EINVAL);
 
-       attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+       attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
        if (!attrs)
                return ERR_PTR(-ENOMEM);
 
        for (i = 0; chip->info[i]; i++) {
-               ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+               ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
                                     chip->info[i]);
-               if (ret < 0)
+               if (ret < 0) {
+                       hwmon_free_attrs(attrs);
                        return ERR_PTR(ret);
+               }
                aindex += ret;
        }
 
@@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                        for (i = 0; groups[i]; i++)
                                ngroups++;
 
-               hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
-                                            GFP_KERNEL);
+               hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
                if (!hwdev->groups) {
                        err = -ENOMEM;
                        goto free_hwmon;
                }
 
-               attrs = __hwmon_create_attrs(dev, drvdata, chip);
+               attrs = __hwmon_create_attrs(drvdata, chip);
                if (IS_ERR(attrs)) {
                        err = PTR_ERR(attrs);
                        goto free_hwmon;
@@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                                           hwmon_temp_input, j))
                                        continue;
                                if (info[i]->config[j] & HWMON_T_INPUT) {
-                                       err = hwmon_thermal_add_sensor(dev,
-                                                               hwdev, j);
+                                       err = hwmon_thermal_add_sensor(hdev, j);
                                        if (err) {
                                                device_unregister(hdev);
                                                /*
@@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        return hdev;
 
 free_hwmon:
-       kfree(hwdev);
+       hwmon_dev_release(hdev);
 ida_remove:
        ida_simple_remove(&hwmon_ida, id);
        return ERR_PTR(err);
index b09c39abd3a818cf2d710588a9d6267a1360e577..eeac4b04df278b62746609e465ae38e6496cde12 100644 (file)
@@ -528,7 +528,7 @@ static int i5k_amb_probe(struct platform_device *pdev)
                goto err;
        }
 
-       data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len);
+       data->amb_mmio = ioremap(data->amb_base, data->amb_len);
        if (!data->amb_mmio) {
                res = -EBUSY;
                goto err_map_failed;
index 5c1dddde193c3e1ce50c539ba91e00b885a73190..e39354ffe973e4a24479ab9e0a134c6cd6523dbc 100644 (file)
@@ -1,13 +1,29 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
+ *             processor hardware monitoring
  *
  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Implementation notes:
+ * - CCD register address information as well as the calculation to
+ *   convert raw register values is from https://github.com/ocerman/zenpower.
+ *   The information is not confirmed from chip datasheets, but experiments
+ *   suggest that it provides reasonable temperature values.
+ * - Register addresses to read chip voltage and current are also from
+ *   https://github.com/ocerman/zenpower, and not confirmed from chip
+ *   datasheets. Current calibration is board specific and not typically
+ *   shared by board vendors. For this reason, current values are
+ *   normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
+ *   current. Reported values can be adjusted using the sensors configuration
+ *   file.
  */
 
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
 #endif
 
 /* CPUID function 0x80000001, ebx */
-#define CPUID_PKGTYPE_MASK     0xf0000000
+#define CPUID_PKGTYPE_MASK     GENMASK(31, 28)
 #define CPUID_PKGTYPE_F                0x00000000
 #define CPUID_PKGTYPE_AM2R2_AM3        0x10000000
 
 /* DRAM controller (PCI function 2) */
 #define REG_DCT0_CONFIG_HIGH           0x094
-#define  DDR3_MODE                     0x00000100
+#define  DDR3_MODE                     BIT(8)
 
 /* miscellaneous (PCI function 3) */
 #define REG_HARDWARE_THERMAL_CONTROL   0x64
-#define  HTC_ENABLE                    0x00000001
+#define  HTC_ENABLE                    BIT(0)
 
 #define REG_REPORTED_TEMPERATURE       0xa4
 
 #define REG_NORTHBRIDGE_CAPABILITIES   0xe8
-#define  NB_CAP_HTC                    0x00000400
+#define  NB_CAP_HTC                    BIT(10)
 
 /*
  * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
@@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
 /* F17h M01h Access througn SMN */
 #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET    0x00059800
 
+#define F17H_M70H_CCD_TEMP(x)                  (0x00059954 + ((x) * 4))
+#define F17H_M70H_CCD_TEMP_VALID               BIT(11)
+#define F17H_M70H_CCD_TEMP_MASK                        GENMASK(10, 0)
+
+#define F17H_M01H_SVI                          0x0005A000
+#define F17H_M01H_SVI_TEL_PLANE0               (F17H_M01H_SVI + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1               (F17H_M01H_SVI + 0x10)
+
+#define CUR_TEMP_SHIFT                         21
+#define CUR_TEMP_RANGE_SEL_MASK                        BIT(19)
+
+#define CFACTOR_ICORE                          1000000 /* 1A / LSB     */
+#define CFACTOR_ISOC                           250000  /* 0.25A / LSB  */
+
 struct k10temp_data {
        struct pci_dev *pdev;
        void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -67,6 +97,10 @@ struct k10temp_data {
        int temp_offset;
        u32 temp_adjust_mask;
        bool show_tdie;
+       u32 show_tccd;
+       u32 svi_addr[2];
+       bool show_current;
+       int cfactor[2];
 };
 
 struct tctl_offset {
@@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = {
        { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
 };
 
+static bool is_threadripper(void)
+{
+       return strstr(boot_cpu_data.x86_model_id, "Threadripper");
+}
+
+static bool is_epyc(void)
+{
+       return strstr(boot_cpu_data.x86_model_id, "EPYC");
+}
+
 static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
 {
        pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
                     F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
 }
 
-static unsigned int get_raw_temp(struct k10temp_data *data)
+static long get_raw_temp(struct k10temp_data *data)
 {
-       unsigned int temp;
        u32 regval;
+       long temp;
 
        data->read_tempreg(data->pdev, &regval);
-       temp = (regval >> 21) * 125;
+       temp = (regval >> CUR_TEMP_SHIFT) * 125;
        if (regval & data->temp_adjust_mask)
                temp -= 49000;
        return temp;
 }
 
-static ssize_t temp1_input_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct k10temp_data *data = dev_get_drvdata(dev);
-       unsigned int temp = get_raw_temp(data);
+const char *k10temp_temp_label[] = {
+       "Tdie",
+       "Tctl",
+       "Tccd1",
+       "Tccd2",
+       "Tccd3",
+       "Tccd4",
+       "Tccd5",
+       "Tccd6",
+       "Tccd7",
+       "Tccd8",
+};
 
-       if (temp > data->temp_offset)
-               temp -= data->temp_offset;
-       else
-               temp = 0;
+const char *k10temp_in_label[] = {
+       "Vcore",
+       "Vsoc",
+};
 
-       return sprintf(buf, "%u\n", temp);
-}
+const char *k10temp_curr_label[] = {
+       "Icore",
+       "Isoc",
+};
 
-static ssize_t temp2_input_show(struct device *dev,
-                               struct device_attribute *devattr, char *buf)
+static int k10temp_read_labels(struct device *dev,
+                              enum hwmon_sensor_types type,
+                              u32 attr, int channel, const char **str)
 {
-       struct k10temp_data *data = dev_get_drvdata(dev);
-       unsigned int temp = get_raw_temp(data);
-
-       return sprintf(buf, "%u\n", temp);
+       switch (type) {
+       case hwmon_temp:
+               *str = k10temp_temp_label[channel];
+               break;
+       case hwmon_in:
+               *str = k10temp_in_label[channel];
+               break;
+       case hwmon_curr:
+               *str = k10temp_curr_label[channel];
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static ssize_t temp_label_show(struct device *dev,
-                              struct device_attribute *devattr, char *buf)
+static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
+                            long *val)
 {
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct k10temp_data *data = dev_get_drvdata(dev);
+       u32 regval;
 
-       return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
+       switch (attr) {
+       case hwmon_curr_input:
+               amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+                            data->svi_addr[channel], &regval);
+               *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
+                                        (regval & 0xff),
+                                        1000);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static ssize_t temp1_max_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
 {
-       return sprintf(buf, "%d\n", 70 * 1000);
+       struct k10temp_data *data = dev_get_drvdata(dev);
+       u32 regval;
+
+       switch (attr) {
+       case hwmon_in_input:
+               amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+                            data->svi_addr[channel], &regval);
+               regval = (regval >> 16) & 0xff;
+               *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static ssize_t temp_crit_show(struct device *dev,
-                             struct device_attribute *devattr, char *buf)
+static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+                            long *val)
 {
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct k10temp_data *data = dev_get_drvdata(dev);
-       int show_hyst = attr->index;
        u32 regval;
-       int value;
 
-       data->read_htcreg(data->pdev, &regval);
-       value = ((regval >> 16) & 0x7f) * 500 + 52000;
-       if (show_hyst)
-               value -= ((regval >> 24) & 0xf) * 500;
-       return sprintf(buf, "%d\n", value);
+       switch (attr) {
+       case hwmon_temp_input:
+               switch (channel) {
+               case 0:         /* Tdie */
+                       *val = get_raw_temp(data) - data->temp_offset;
+                       if (*val < 0)
+                               *val = 0;
+                       break;
+               case 1:         /* Tctl */
+                       *val = get_raw_temp(data);
+                       if (*val < 0)
+                               *val = 0;
+                       break;
+               case 2 ... 9:           /* Tccd{1-8} */
+                       amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+                                    F17H_M70H_CCD_TEMP(channel - 2), &regval);
+                       *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case hwmon_temp_max:
+               *val = 70 * 1000;
+               break;
+       case hwmon_temp_crit:
+               data->read_htcreg(data->pdev, &regval);
+               *val = ((regval >> 16) & 0x7f) * 500 + 52000;
+               break;
+       case hwmon_temp_crit_hyst:
+               data->read_htcreg(data->pdev, &regval);
+               *val = (((regval >> 16) & 0x7f)
+                       - ((regval >> 24) & 0xf)) * 500 + 52000;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
 }
 
-static DEVICE_ATTR_RO(temp1_input);
-static DEVICE_ATTR_RO(temp1_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
-static DEVICE_ATTR_RO(temp2_input);
-static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
+static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       switch (type) {
+       case hwmon_temp:
+               return k10temp_read_temp(dev, attr, channel, val);
+       case hwmon_in:
+               return k10temp_read_in(dev, attr, channel, val);
+       case hwmon_curr:
+               return k10temp_read_curr(dev, attr, channel, val);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
 
-static umode_t k10temp_is_visible(struct kobject *kobj,
-                                 struct attribute *attr, int index)
+static umode_t k10temp_is_visible(const void *_data,
+                                 enum hwmon_sensor_types type,
+                                 u32 attr, int channel)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct k10temp_data *data = dev_get_drvdata(dev);
+       const struct k10temp_data *data = _data;
        struct pci_dev *pdev = data->pdev;
        u32 reg;
 
-       switch (index) {
-       case 0 ... 1:   /* temp1_input, temp1_max */
-       default:
-               break;
-       case 2 ... 3:   /* temp1_crit, temp1_crit_hyst */
-               if (!data->read_htcreg)
-                       return 0;
-
-               pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
-                                     &reg);
-               if (!(reg & NB_CAP_HTC))
-                       return 0;
-
-               data->read_htcreg(data->pdev, &reg);
-               if (!(reg & HTC_ENABLE))
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+                       switch (channel) {
+                       case 0:         /* Tdie, or Tctl if we don't show it */
+                               break;
+                       case 1:         /* Tctl */
+                               if (!data->show_tdie)
+                                       return 0;
+                               break;
+                       case 2 ... 9:           /* Tccd{1-8} */
+                               if (!(data->show_tccd & BIT(channel - 2)))
+                                       return 0;
+                               break;
+                       default:
+                               return 0;
+                       }
+                       break;
+               case hwmon_temp_max:
+                       if (channel || data->show_tdie)
+                               return 0;
+                       break;
+               case hwmon_temp_crit:
+               case hwmon_temp_crit_hyst:
+                       if (channel || !data->read_htcreg)
+                               return 0;
+
+                       pci_read_config_dword(pdev,
+                                             REG_NORTHBRIDGE_CAPABILITIES,
+                                             &reg);
+                       if (!(reg & NB_CAP_HTC))
+                               return 0;
+
+                       data->read_htcreg(data->pdev, &reg);
+                       if (!(reg & HTC_ENABLE))
+                               return 0;
+                       break;
+               case hwmon_temp_label:
+                       /* No labels if we don't show the die temperature */
+                       if (!data->show_tdie)
+                               return 0;
+                       switch (channel) {
+                       case 0:         /* Tdie */
+                       case 1:         /* Tctl */
+                               break;
+                       case 2 ... 9:           /* Tccd{1-8} */
+                               if (!(data->show_tccd & BIT(channel - 2)))
+                                       return 0;
+                               break;
+                       default:
+                               return 0;
+                       }
+                       break;
+               default:
                        return 0;
+               }
                break;
-       case 4 ... 6:   /* temp1_label, temp2_input, temp2_label */
-               if (!data->show_tdie)
+       case hwmon_in:
+       case hwmon_curr:
+               if (!data->show_current)
                        return 0;
                break;
+       default:
+               return 0;
        }
-       return attr->mode;
+       return 0444;
 }
 
-static struct attribute *k10temp_attrs[] = {
-       &dev_attr_temp1_input.attr,
-       &dev_attr_temp1_max.attr,
-       &sensor_dev_attr_temp1_crit.dev_attr.attr,
-       &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
-       &sensor_dev_attr_temp1_label.dev_attr.attr,
-       &dev_attr_temp2_input.attr,
-       &sensor_dev_attr_temp2_label.dev_attr.attr,
-       NULL
-};
-
-static const struct attribute_group k10temp_group = {
-       .attrs = k10temp_attrs,
-       .is_visible = k10temp_is_visible,
-};
-__ATTRIBUTE_GROUPS(k10temp);
-
 static bool has_erratum_319(struct pci_dev *pdev)
 {
        u32 pkg_type, reg_dram_cfg;
@@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev)
               (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
 }
 
-static int k10temp_probe(struct pci_dev *pdev,
-                                  const struct pci_device_id *id)
+#ifdef CONFIG_DEBUG_FS
+
+static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
+                                 u32 addr, int count)
+{
+       u32 reg;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               if (!(i & 3))
+                       seq_printf(s, "0x%06x: ", addr + i * 4);
+               amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, &reg);
+               seq_printf(s, "%08x ", reg);
+               if ((i & 3) == 3)
+                       seq_puts(s, "\n");
+       }
+}
+
+static int svi_show(struct seq_file *s, void *unused)
+{
+       struct k10temp_data *data = s->private;
+
+       k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(svi);
+
+static int thm_show(struct seq_file *s, void *unused)
+{
+       struct k10temp_data *data = s->private;
+
+       k10temp_smn_regs_show(s, data->pdev,
+                             F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(thm);
+
+static void k10temp_debugfs_cleanup(void *ddir)
+{
+       debugfs_remove_recursive(ddir);
+}
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+       struct dentry *debugfs;
+       char name[32];
+
+       /* Only show debugfs data for Family 17h/18h CPUs */
+       if (!data->show_tdie)
+               return;
+
+       scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
+
+       debugfs = debugfs_create_dir(name, NULL);
+       if (debugfs) {
+               debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
+               debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
+               devm_add_action_or_reset(&data->pdev->dev,
+                                        k10temp_debugfs_cleanup, debugfs);
+       }
+}
+
+#else
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+}
+
+#endif
+
+static const struct hwmon_channel_info *k10temp_info[] = {
+       HWMON_CHANNEL_INFO(temp,
+                          HWMON_T_INPUT | HWMON_T_MAX |
+                          HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+                          HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL,
+                          HWMON_T_INPUT | HWMON_T_LABEL),
+       HWMON_CHANNEL_INFO(in,
+                          HWMON_I_INPUT | HWMON_I_LABEL,
+                          HWMON_I_INPUT | HWMON_I_LABEL),
+       HWMON_CHANNEL_INFO(curr,
+                          HWMON_C_INPUT | HWMON_C_LABEL,
+                          HWMON_C_INPUT | HWMON_C_LABEL),
+       NULL
+};
+
+static const struct hwmon_ops k10temp_hwmon_ops = {
+       .is_visible = k10temp_is_visible,
+       .read = k10temp_read,
+       .read_string = k10temp_read_labels,
+};
+
+static const struct hwmon_chip_info k10temp_chip_info = {
+       .ops = &k10temp_hwmon_ops,
+       .info = k10temp_info,
+};
+
+static void k10temp_get_ccd_support(struct pci_dev *pdev,
+                                   struct k10temp_data *data, int limit)
+{
+       u32 regval;
+       int i;
+
+       for (i = 0; i < limit; i++) {
+               amd_smn_read(amd_pci_dev_to_node_id(pdev),
+                            F17H_M70H_CCD_TEMP(i), &regval);
+               if (regval & F17H_M70H_CCD_TEMP_VALID)
+                       data->show_tccd |= BIT(i);
+       }
+}
+
+static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int unreliable = has_erratum_319(pdev);
        struct device *dev = &pdev->dev;
@@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev,
                data->read_htcreg = read_htcreg_nb_f15;
                data->read_tempreg = read_tempreg_nb_f15;
        } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
-               data->temp_adjust_mask = 0x80000;
+               data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
                data->read_tempreg = read_tempreg_nb_f17;
                data->show_tdie = true;
+
+               switch (boot_cpu_data.x86_model) {
+               case 0x1:       /* Zen */
+               case 0x8:       /* Zen+ */
+               case 0x11:      /* Zen APU */
+               case 0x18:      /* Zen+ APU */
+                       data->show_current = !is_threadripper() && !is_epyc();
+                       data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
+                       data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
+                       data->cfactor[0] = CFACTOR_ICORE;
+                       data->cfactor[1] = CFACTOR_ISOC;
+                       k10temp_get_ccd_support(pdev, data, 4);
+                       break;
+               case 0x31:      /* Zen2 Threadripper */
+               case 0x71:      /* Zen2 */
+                       data->show_current = !is_threadripper() && !is_epyc();
+                       data->cfactor[0] = CFACTOR_ICORE;
+                       data->cfactor[1] = CFACTOR_ISOC;
+                       data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
+                       data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+                       k10temp_get_ccd_support(pdev, data, 8);
+                       break;
+               }
        } else {
                data->read_htcreg = read_htcreg_pci;
                data->read_tempreg = read_tempreg_pci;
@@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev,
                }
        }
 
-       hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
-                                                          k10temp_groups);
-       return PTR_ERR_OR_ZERO(hwmon_dev);
+       hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
+                                                        &k10temp_chip_info,
+                                                        NULL);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
+
+       k10temp_init_debugfs(data);
+
+       return 0;
 }
 
 static const struct pci_device_id k10temp_id_table[] = {
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
new file mode 100644 (file)
index 0000000..eb22a34
--- /dev/null
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX31730 3-Channel Remote Temperature Sensor
+ *
+ * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c,
+                                            0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+
+/* The MAX31730 registers */
+#define MAX31730_REG_TEMP              0x00
+#define MAX31730_REG_CONF              0x13
+#define  MAX31730_STOP                 BIT(7)
+#define  MAX31730_EXTRANGE             BIT(1)
+#define MAX31730_REG_TEMP_OFFSET       0x16
+#define  MAX31730_TEMP_OFFSET_BASELINE 0x77
+#define MAX31730_REG_OFFSET_ENABLE     0x17
+#define MAX31730_REG_TEMP_MAX          0x20
+#define MAX31730_REG_TEMP_MIN          0x30
+#define MAX31730_REG_STATUS_HIGH       0x32
+#define MAX31730_REG_STATUS_LOW                0x33
+#define MAX31730_REG_CHANNEL_ENABLE    0x35
+#define MAX31730_REG_TEMP_FAULT                0x36
+
+#define MAX31730_REG_MFG_ID            0x50
+#define  MAX31730_MFG_ID               0x4d
+#define MAX31730_REG_MFG_REV           0x51
+#define  MAX31730_MFG_REV              0x01
+
+#define MAX31730_TEMP_MIN              (-128000)
+#define MAX31730_TEMP_MAX              127937
+
+/* Each client has this additional data */
+struct max31730_data {
+       struct i2c_client       *client;
+       u8                      orig_conf;
+       u8                      current_conf;
+       u8                      offset_enable;
+       u8                      channel_enable;
+};
+
+/*-----------------------------------------------------------------------*/
+
+static inline long max31730_reg_to_mc(s16 temp)
+{
+       return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16);
+}
+
+static int max31730_write_config(struct max31730_data *data, u8 set_mask,
+                                u8 clr_mask)
+{
+       u8 value;
+
+       clr_mask |= MAX31730_EXTRANGE;
+       value = data->current_conf & ~clr_mask;
+       value |= set_mask;
+
+       if (data->current_conf != value) {
+               s32 err;
+
+               err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF,
+                                               value);
+               if (err)
+                       return err;
+               data->current_conf = value;
+       }
+       return 0;
+}
+
+static int max31730_set_enable(struct i2c_client *client, int reg,
+                              u8 *confdata, int channel, bool enable)
+{
+       u8 regval = *confdata;
+       int err;
+
+       if (enable)
+               regval |= BIT(channel);
+       else
+               regval &= ~BIT(channel);
+
+       if (regval != *confdata) {
+               err = i2c_smbus_write_byte_data(client, reg, regval);
+               if (err)
+                       return err;
+               *confdata = regval;
+       }
+       return 0;
+}
+
+static int max31730_set_offset_enable(struct max31730_data *data, int channel,
+                                     bool enable)
+{
+       return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE,
+                                  &data->offset_enable, channel, enable);
+}
+
+static int max31730_set_channel_enable(struct max31730_data *data, int channel,
+                                      bool enable)
+{
+       return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE,
+                                  &data->channel_enable, channel, enable);
+}
+
+static int max31730_read(struct device *dev, enum hwmon_sensor_types type,
+                        u32 attr, int channel, long *val)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+       int regval, reg, offset;
+
+       if (type != hwmon_temp)
+               return -EINVAL;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               if (!(data->channel_enable & BIT(channel)))
+                       return -ENODATA;
+               reg = MAX31730_REG_TEMP + (channel * 2);
+               break;
+       case hwmon_temp_max:
+               reg = MAX31730_REG_TEMP_MAX + (channel * 2);
+               break;
+       case hwmon_temp_min:
+               reg = MAX31730_REG_TEMP_MIN;
+               break;
+       case hwmon_temp_enable:
+               *val = !!(data->channel_enable & BIT(channel));
+               return 0;
+       case hwmon_temp_offset:
+               if (!channel)
+                       return -EINVAL;
+               if (!(data->offset_enable & BIT(channel))) {
+                       *val = 0;
+                       return 0;
+               }
+               offset = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_TEMP_OFFSET);
+               if (offset < 0)
+                       return offset;
+               *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125;
+               return 0;
+       case hwmon_temp_fault:
+               regval = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_TEMP_FAULT);
+               if (regval < 0)
+                       return regval;
+               *val = !!(regval & BIT(channel));
+               return 0;
+       case hwmon_temp_min_alarm:
+               regval = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_STATUS_LOW);
+               if (regval < 0)
+                       return regval;
+               *val = !!(regval & BIT(channel));
+               return 0;
+       case hwmon_temp_max_alarm:
+               regval = i2c_smbus_read_byte_data(data->client,
+                                                 MAX31730_REG_STATUS_HIGH);
+               if (regval < 0)
+                       return regval;
+               *val = !!(regval & BIT(channel));
+               return 0;
+       default:
+               return -EINVAL;
+       }
+       regval = i2c_smbus_read_word_swapped(data->client, reg);
+       if (regval < 0)
+               return regval;
+
+       *val = max31730_reg_to_mc(regval);
+
+       return 0;
+}
+
+static int max31730_write(struct device *dev, enum hwmon_sensor_types type,
+                         u32 attr, int channel, long val)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+       int reg, err;
+
+       if (type != hwmon_temp)
+               return -EINVAL;
+
+       switch (attr) {
+       case hwmon_temp_max:
+               reg = MAX31730_REG_TEMP_MAX + channel * 2;
+               break;
+       case hwmon_temp_min:
+               reg = MAX31730_REG_TEMP_MIN;
+               break;
+       case hwmon_temp_enable:
+               if (val != 0 && val != 1)
+                       return -EINVAL;
+               return max31730_set_channel_enable(data, channel, val);
+       case hwmon_temp_offset:
+               val = clamp_val(val, -14875, 17000) + 14875;
+               val = DIV_ROUND_CLOSEST(val, 125);
+               err = max31730_set_offset_enable(data, channel,
+                                       val != MAX31730_TEMP_OFFSET_BASELINE);
+               if (err)
+                       return err;
+               return i2c_smbus_write_byte_data(data->client,
+                                                MAX31730_REG_TEMP_OFFSET, val);
+       default:
+               return -EINVAL;
+       }
+
+       val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX);
+       val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4;
+
+       return i2c_smbus_write_word_swapped(data->client, reg, (u16)val);
+}
+
+static umode_t max31730_is_visible(const void *data,
+                                  enum hwmon_sensor_types type,
+                                  u32 attr, int channel)
+{
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+               case hwmon_temp_min_alarm:
+               case hwmon_temp_max_alarm:
+               case hwmon_temp_fault:
+                       return 0444;
+               case hwmon_temp_min:
+                       return channel ? 0444 : 0644;
+               case hwmon_temp_offset:
+               case hwmon_temp_enable:
+               case hwmon_temp_max:
+                       return 0644;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static const struct hwmon_channel_info *max31730_info[] = {
+       HWMON_CHANNEL_INFO(chip,
+                          HWMON_C_REGISTER_TZ),
+       HWMON_CHANNEL_INFO(temp,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_OFFSET | HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+                          HWMON_T_FAULT,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_OFFSET | HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+                          HWMON_T_FAULT,
+                          HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+                          HWMON_T_OFFSET | HWMON_T_ENABLE |
+                          HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+                          HWMON_T_FAULT
+                          ),
+       NULL
+};
+
+static const struct hwmon_ops max31730_hwmon_ops = {
+       .is_visible = max31730_is_visible,
+       .read = max31730_read,
+       .write = max31730_write,
+};
+
+static const struct hwmon_chip_info max31730_chip_info = {
+       .ops = &max31730_hwmon_ops,
+       .info = max31730_info,
+};
+
+static void max31730_remove(void *data)
+{
+       struct max31730_data *max31730 = data;
+       struct i2c_client *client = max31730->client;
+
+       i2c_smbus_write_byte_data(client, MAX31730_REG_CONF,
+                                 max31730->orig_conf);
+}
+
+static int
+max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       struct max31730_data *data;
+       int status, err;
+
+       if (!i2c_check_functionality(client->adapter,
+                       I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+               return -EIO;
+
+       data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->client = client;
+
+       /* Cache original configuration and enable status */
+       status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE);
+       if (status < 0)
+               return status;
+       data->channel_enable = status;
+
+       status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE);
+       if (status < 0)
+               return status;
+       data->offset_enable = status;
+
+       status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF);
+       if (status < 0)
+               return status;
+       data->orig_conf = status;
+       data->current_conf = status;
+
+       err = max31730_write_config(data,
+                                   data->channel_enable ? 0 : MAX31730_STOP,
+                                   data->channel_enable ? MAX31730_STOP : 0);
+       if (err)
+               return err;
+
+       dev_set_drvdata(dev, data);
+
+       err = devm_add_action_or_reset(dev, max31730_remove, data);
+       if (err)
+               return err;
+
+       hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+                                                        data,
+                                                        &max31730_chip_info,
+                                                        NULL);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31730_ids[] = {
+       { "max31730", 0, },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, max31730_ids);
+
+static const struct of_device_id __maybe_unused max31730_of_match[] = {
+       {
+               .compatible = "maxim,max31730",
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(of, max31730_of_match);
+
+static bool max31730_check_reg_temp(struct i2c_client *client,
+                                   int reg)
+{
+       int regval;
+
+       regval = i2c_smbus_read_byte_data(client, reg + 1);
+       return regval < 0 || (regval & 0x0f);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max31730_detect(struct i2c_client *client,
+                          struct i2c_board_info *info)
+{
+       struct i2c_adapter *adapter = client->adapter;
+       int regval;
+       int i;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
+
+       regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID);
+       if (regval != MAX31730_MFG_ID)
+               return -ENODEV;
+       regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV);
+       if (regval != MAX31730_MFG_REV)
+               return -ENODEV;
+
+       /* lower 4 bit of temperature and limit registers must be 0 */
+       if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN))
+               return -ENODEV;
+
+       for (i = 0; i < 4; i++) {
+               if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2))
+                       return -ENODEV;
+               if (max31730_check_reg_temp(client,
+                                           MAX31730_REG_TEMP_MAX + i * 2))
+                       return -ENODEV;
+       }
+
+       strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+
+       return 0;
+}
+
+static int __maybe_unused max31730_suspend(struct device *dev)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+
+       return max31730_write_config(data, MAX31730_STOP, 0);
+}
+
+static int __maybe_unused max31730_resume(struct device *dev)
+{
+       struct max31730_data *data = dev_get_drvdata(dev);
+
+       return max31730_write_config(data, 0, MAX31730_STOP);
+}
+
+static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+
+static struct i2c_driver max31730_driver = {
+       .class          = I2C_CLASS_HWMON,
+       .driver = {
+               .name   = "max31730",
+               .of_match_table = of_match_ptr(max31730_of_match),
+               .pm     = &max31730_pm_ops,
+       },
+       .probe          = max31730_probe,
+       .id_table       = max31730_ids,
+       .detect         = max31730_detect,
+       .address_list   = normal_i2c,
+};
+
+module_i2c_driver(max31730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX31730 driver");
+MODULE_LICENSE("GPL");
index f3dd2a17bd426244d72d26a8d027fa24523cbb00..2e97e56c72c743d7b1e8d898b055f48a3bbd90d4 100644 (file)
@@ -23,8 +23,8 @@
 static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
 
 static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
-       { 0x40, 0x00, 0x42, 0x44, 0x46 },
-       { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+       { 0x46, 0x00, 0x40, 0x42, 0x44 },
+       { 0x45, 0x00, 0x3f, 0x41, 0x43 },
 };
 
 static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
 struct nct7802_data {
        struct regmap *regmap;
        struct mutex access_lock; /* for multi-byte read and write operations */
+       u8 in_status;
+       struct mutex in_alarm_lock;
 };
 
 static ssize_t temp_type_show(struct device *dev,
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
        return err ? : count;
 }
 
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+       struct nct7802_data *data = dev_get_drvdata(dev);
+       int volt, min, max, ret;
+       unsigned int val;
+
+       mutex_lock(&data->in_alarm_lock);
+
+       /*
+        * The SMI Voltage status register is the only register giving a status
+        * for voltages. A bit is set for each input crossing a threshold, in
+        * both direction, but the "inside" or "outside" limits info is not
+        * available. Also this register is cleared on read.
+        * Note: this is not explicitly spelled out in the datasheet, but
+        * from experiment.
+        * To deal with this we use a status cache with one validity bit and
+        * one status bit for each input. Validity is cleared at startup and
+        * each time the register reports a change, and the status is processed
+        * by software based on current input value and limits.
+        */
+       ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+       if (ret < 0)
+               goto abort;
+
+       /* invalidate cached status for all inputs crossing a threshold */
+       data->in_status &= ~((val & 0x0f) << 4);
+
+       /* if cached status for requested input is invalid, update it */
+       if (!(data->in_status & (0x10 << sattr->index))) {
+               ret = nct7802_read_voltage(data, sattr->nr, 0);
+               if (ret < 0)
+                       goto abort;
+               volt = ret;
+
+               ret = nct7802_read_voltage(data, sattr->nr, 1);
+               if (ret < 0)
+                       goto abort;
+               min = ret;
+
+               ret = nct7802_read_voltage(data, sattr->nr, 2);
+               if (ret < 0)
+                       goto abort;
+               max = ret;
+
+               if (volt < min || volt > max)
+                       data->in_status |= (1 << sattr->index);
+               else
+                       data->in_status &= ~(1 << sattr->index);
+
+               data->in_status |= 0x10 << sattr->index;
+       }
+
+       ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+       mutex_unlock(&data->in_alarm_lock);
+       return ret;
+}
+
 static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
 static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
 static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
 
 static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
 static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
 
 static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
 
 static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
 static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
 
 static struct attribute *nct7802_in_attrs[] = {
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
                return PTR_ERR(data->regmap);
 
        mutex_init(&data->access_lock);
+       mutex_init(&data->in_alarm_lock);
 
        ret = nct7802_init_chip(data);
        if (ret < 0)
index 59859979571df5a24cc8b78b564c4af380110ddc..a9ea06204767dea2e3ef90a60547d2783b23c5b4 100644 (file)
@@ -20,8 +20,8 @@ config SENSORS_PMBUS
        help
          If you say yes here you get hardware monitoring support for generic
          PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
-         MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
-         TPS544B25, TPS544C20, TPS544C25, and UDT020.
+         MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
+         TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
 
          This driver can also be built as a module. If so, the module will
          be called pmbus.
@@ -145,6 +145,15 @@ config SENSORS_MAX16064
          This driver can also be built as a module. If so, the module will
          be called max16064.
 
+config SENSORS_MAX20730
+       tristate "Maxim MAX20730, MAX20734, MAX20743"
+       help
+         If you say yes here you get hardware monitoring support for Maxim
+         MAX20730, MAX20734, and MAX20743.
+
+         This driver can also be built as a module. If so, the module will
+         be called max20730.
+
 config SENSORS_MAX20751
        tristate "Maxim MAX20751"
        help
@@ -200,20 +209,20 @@ config SENSORS_TPS40422
          be called tps40422.
 
 config SENSORS_TPS53679
-       tristate "TI TPS53679"
+       tristate "TI TPS53679, TPS53688"
        help
          If you say yes here you get hardware monitoring support for TI
-         TPS53679.
+         TPS53679, TPS53688
 
          This driver can also be built as a module. If so, the module will
          be called tps53679.
 
 config SENSORS_UCD9000
-       tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
+       tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
        help
          If you say yes here you get hardware monitoring support for TI
-         UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
-         Health Controllers.
+         UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer
+         and System Health Controllers.
 
          This driver can also be built as a module. If so, the module will
          be called ucd9000.
@@ -228,6 +237,15 @@ config SENSORS_UCD9200
          This driver can also be built as a module. If so, the module will
          be called ucd9200.
 
+config SENSORS_XDPE122
+       tristate "Infineon XDPE122 family"
+       help
+         If you say yes here you get hardware monitoring support for Infineon
+         XDPE12254, XDPE12284, device.
+
+         This driver can also be built as a module. If so, the module will
+         be called xdpe12284.
+
 config SENSORS_ZL6100
        tristate "Intersil ZL6100 and compatibles"
        help
index 3f8c1014938b7aa8ca72c03210480ab80052476f..5feb45806123e3740aa1800cd6339a5160a93d5d 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
 obj-$(CONFIG_SENSORS_LTC2978)  += ltc2978.o
 obj-$(CONFIG_SENSORS_LTC3815)  += ltc3815.o
 obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
 obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
 obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
 obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422)        += tps40422.o
 obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
 obj-$(CONFIG_SENSORS_UCD9000)  += ucd9000.o
 obj-$(CONFIG_SENSORS_UCD9200)  += ucd9200.o
+obj-$(CONFIG_SENSORS_XDPE122)  += xdpe12284.o
 obj-$(CONFIG_SENSORS_ZL6100)   += zl6100.o
index d359b76bcb36430dad86e98945ef6a9dc0da8f63..3795fe55b84fee866db49dcfea65962290ada075 100644 (file)
 
 #define CFFPS_FRU_CMD                          0x9A
 #define CFFPS_PN_CMD                           0x9B
+#define CFFPS_HEADER_CMD                       0x9C
 #define CFFPS_SN_CMD                           0x9E
+#define CFFPS_MAX_POWER_OUT_CMD                        0xA7
 #define CFFPS_CCIN_CMD                         0xBD
 #define CFFPS_FW_CMD                           0xFA
 #define CFFPS1_FW_NUM_BYTES                    4
 #define CFFPS2_FW_NUM_WORDS                    3
 #define CFFPS_SYS_CONFIG_CMD                   0xDA
+#define CFFPS_12VCS_VOUT_CMD                   0xDE
 
 #define CFFPS_INPUT_HISTORY_CMD                        0xD6
 #define CFFPS_INPUT_HISTORY_SIZE               100
 #define CFFPS_MFR_VAUX_FAULT                   BIT(6)
 #define CFFPS_MFR_CURRENT_SHARE_WARNING                BIT(7)
 
-/*
- * LED off state actually relinquishes LED control to PSU firmware, so it can
- * turn on the LED for faults.
- */
-#define CFFPS_LED_OFF                          0
 #define CFFPS_LED_BLINK                                BIT(0)
 #define CFFPS_LED_ON                           BIT(1)
+#define CFFPS_LED_OFF                          BIT(2)
 #define CFFPS_BLINK_RATE_MS                    250
 
 enum {
        CFFPS_DEBUGFS_INPUT_HISTORY = 0,
        CFFPS_DEBUGFS_FRU,
        CFFPS_DEBUGFS_PN,
+       CFFPS_DEBUGFS_HEADER,
        CFFPS_DEBUGFS_SN,
+       CFFPS_DEBUGFS_MAX_POWER_OUT,
        CFFPS_DEBUGFS_CCIN,
        CFFPS_DEBUGFS_FW,
+       CFFPS_DEBUGFS_ON_OFF_CONFIG,
        CFFPS_DEBUGFS_NUM_ENTRIES
 };
 
@@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
                                       psu->input_history.byte_count);
 }
 
-static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
-                                   size_t count, loff_t *ppos)
+static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
+                                     size_t count, loff_t *ppos)
 {
        u8 cmd;
        int i, rc;
        int *idxp = file->private_data;
        int idx = *idxp;
        struct ibm_cffps *psu = to_psu(idxp, idx);
-       char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+       char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
 
        pmbus_set_page(psu->client, 0);
 
@@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
        case CFFPS_DEBUGFS_PN:
                cmd = CFFPS_PN_CMD;
                break;
+       case CFFPS_DEBUGFS_HEADER:
+               cmd = CFFPS_HEADER_CMD;
+               break;
        case CFFPS_DEBUGFS_SN:
                cmd = CFFPS_SN_CMD;
                break;
+       case CFFPS_DEBUGFS_MAX_POWER_OUT:
+               rc = i2c_smbus_read_word_swapped(psu->client,
+                                                CFFPS_MAX_POWER_OUT_CMD);
+               if (rc < 0)
+                       return rc;
+
+               rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc);
+               goto done;
        case CFFPS_DEBUGFS_CCIN:
                rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
                if (rc < 0)
@@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
                        return -EOPNOTSUPP;
                }
                goto done;
+       case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+               rc = i2c_smbus_read_byte_data(psu->client,
+                                             PMBUS_ON_OFF_CONFIG);
+               if (rc < 0)
+                       return rc;
+
+               rc = snprintf(data, 3, "%02x", rc);
+               goto done;
        default:
                return -EINVAL;
        }
@@ -214,9 +235,42 @@ done:
        return simple_read_from_buffer(buf, count, ppos, data, rc);
 }
 
+static ssize_t ibm_cffps_debugfs_write(struct file *file,
+                                      const char __user *buf, size_t count,
+                                      loff_t *ppos)
+{
+       u8 data;
+       ssize_t rc;
+       int *idxp = file->private_data;
+       int idx = *idxp;
+       struct ibm_cffps *psu = to_psu(idxp, idx);
+
+       switch (idx) {
+       case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+               pmbus_set_page(psu->client, 0);
+
+               rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
+               if (rc <= 0)
+                       return rc;
+
+               rc = i2c_smbus_write_byte_data(psu->client,
+                                              PMBUS_ON_OFF_CONFIG, data);
+               if (rc)
+                       return rc;
+
+               rc = 1;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return rc;
+}
+
 static const struct file_operations ibm_cffps_fops = {
        .llseek = noop_llseek,
-       .read = ibm_cffps_debugfs_op,
+       .read = ibm_cffps_debugfs_read,
+       .write = ibm_cffps_debugfs_write,
        .open = simple_open,
 };
 
@@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
                if (mfr & CFFPS_MFR_PS_KILL)
                        rc |= PB_STATUS_OFF;
                break;
+       case PMBUS_VIRT_READ_VMON:
+               rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+               break;
        default:
                rc = -ENODATA;
                break;
@@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
        rc = devm_led_classdev_register(dev, &psu->led);
        if (rc)
                dev_warn(dev, "failed to register led class: %d\n", rc);
+       else
+               i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD,
+                                         CFFPS_LED_OFF);
 }
 
 static struct pmbus_driver_info ibm_cffps_info[] = {
@@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = {
                        PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
                        PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
                        PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
-                       PMBUS_HAVE_STATUS_FAN12,
+                       PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON,
                .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
                        PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
                        PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
@@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client,
        debugfs_create_file("part_number", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
                            &ibm_cffps_fops);
+       debugfs_create_file("header", 0444, ibm_cffps_dir,
+                           &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER],
+                           &ibm_cffps_fops);
        debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
                            &ibm_cffps_fops);
+       debugfs_create_file("max_power_out", 0444, ibm_cffps_dir,
+                           &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT],
+                           &ibm_cffps_fops);
        debugfs_create_file("ccin", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
                            &ibm_cffps_fops);
        debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
                            &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
                            &ibm_cffps_fops);
+       debugfs_create_file("on_off_config", 0644, ibm_cffps_dir,
+                           &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG],
+                           &ibm_cffps_fops);
 
        return 0;
 }
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
new file mode 100644 (file)
index 0000000..294e221
--- /dev/null
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down
+ * Switching Regulators
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include <linux/util_macros.h>
+#include "pmbus.h"
+
+enum chips {
+       max20730,
+       max20734,
+       max20743
+};
+
+struct max20730_data {
+       enum chips id;
+       struct pmbus_driver_info info;
+       struct mutex lock;      /* Used to protect against parallel writes */
+       u16 mfr_devset1;
+};
+
+#define to_max20730_data(x)  container_of(x, struct max20730_data, info)
+
+#define MAX20730_MFR_DEVSET1   0xd2
+
+/*
+ * Convert discreet value to direct data format. Strictly speaking, all passed
+ * values are constants, so we could do that calculation manually. On the
+ * downside, that would make the driver more difficult to maintain, so lets
+ * use this approach.
+ */
+static u16 val_to_direct(int v, enum pmbus_sensor_classes class,
+                        const struct pmbus_driver_info *info)
+{
+       int R = info->R[class] - 3;     /* take milli-units into account */
+       int b = info->b[class] * 1000;
+       long d;
+
+       d = v * info->m[class] + b;
+       /*
+        * R < 0 is true for all callers, so we don't need to bother
+        * about the R > 0 case.
+        */
+       while (R < 0) {
+               d = DIV_ROUND_CLOSEST(d, 10);
+               R++;
+       }
+       return (u16)d;
+}
+
+static long direct_to_val(u16 w, enum pmbus_sensor_classes class,
+                         const struct pmbus_driver_info *info)
+{
+       int R = info->R[class] - 3;
+       int b = info->b[class] * 1000;
+       int m = info->m[class];
+       long d = (s16)w;
+
+       if (m == 0)
+               return 0;
+
+       while (R < 0) {
+               d *= 10;
+               R++;
+       }
+       d = (d - b) / m;
+       return d;
+}
+
+static u32 max_current[][5] = {
+       [max20730] = { 13000, 16600, 20100, 23600 },
+       [max20734] = { 21000, 27000, 32000, 38000 },
+       [max20743] = { 18900, 24100, 29200, 34100 },
+};
+
+static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+{
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       const struct max20730_data *data = to_max20730_data(info);
+       int ret = 0;
+       u32 max_c;
+
+       switch (reg) {
+       case PMBUS_OT_FAULT_LIMIT:
+               switch ((data->mfr_devset1 >> 11) & 0x3) {
+               case 0x0:
+                       ret = val_to_direct(150000, PSC_TEMPERATURE, info);
+                       break;
+               case 0x1:
+                       ret = val_to_direct(130000, PSC_TEMPERATURE, info);
+                       break;
+               default:
+                       ret = -ENODATA;
+                       break;
+               }
+               break;
+       case PMBUS_IOUT_OC_FAULT_LIMIT:
+               max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
+               ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
+               break;
+       default:
+               ret = -ENODATA;
+               break;
+       }
+       return ret;
+}
+
+static int max20730_write_word_data(struct i2c_client *client, int page,
+                                   int reg, u16 word)
+{
+       struct pmbus_driver_info *info;
+       struct max20730_data *data;
+       u16 devset1;
+       int ret = 0;
+       int idx;
+
+       info = (struct pmbus_driver_info *)pmbus_get_driver_info(client);
+       data = to_max20730_data(info);
+
+       mutex_lock(&data->lock);
+       devset1 = data->mfr_devset1;
+
+       switch (reg) {
+       case PMBUS_OT_FAULT_LIMIT:
+               devset1 &= ~(BIT(11) | BIT(12));
+               if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000)
+                       devset1 |= BIT(11);
+               break;
+       case PMBUS_IOUT_OC_FAULT_LIMIT:
+               devset1 &= ~(BIT(5) | BIT(6));
+
+               idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info),
+                                  max_current[data->id], 4);
+               devset1 |= (idx << 5);
+               break;
+       default:
+               ret = -ENODATA;
+               break;
+       }
+
+       if (!ret && devset1 != data->mfr_devset1) {
+               ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1,
+                                               devset1);
+               if (!ret) {
+                       data->mfr_devset1 = devset1;
+                       pmbus_clear_cache(client);
+               }
+       }
+       mutex_unlock(&data->lock);
+       return ret;
+}
+
+static const struct pmbus_driver_info max20730_info[] = {
+       [max20730] = {
+               .pages = 1,
+               .read_word_data = max20730_read_word_data,
+               .write_word_data = max20730_write_word_data,
+
+               /* Source : Maxim AN6042 */
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_TEMPERATURE] = 21,
+               .b[PSC_TEMPERATURE] = 5887,
+               .R[PSC_TEMPERATURE] = -1,
+
+               .format[PSC_VOLTAGE_IN] = direct,
+               .m[PSC_VOLTAGE_IN] = 3609,
+               .b[PSC_VOLTAGE_IN] = 0,
+               .R[PSC_VOLTAGE_IN] = -2,
+
+               /*
+                * Values in the datasheet are adjusted for temperature and
+                * for the relationship between Vin and Vout.
+                * Unfortunately, the data sheet suggests that Vout measurement
+                * may be scaled with a resistor array. This is indeed the case
+                * at least on the evaulation boards. As a result, any in-driver
+                * adjustments would either be wrong or require elaborate means
+                * to configure the scaling. Instead of doing that, just report
+                * raw values and let userspace handle adjustments.
+                */
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_CURRENT_OUT] = 153,
+               .b[PSC_CURRENT_OUT] = 4976,
+               .R[PSC_CURRENT_OUT] = -1,
+
+               .format[PSC_VOLTAGE_OUT] = linear,
+
+               .func[0] = PMBUS_HAVE_VIN |
+                       PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+                       PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+       },
+       [max20734] = {
+               .pages = 1,
+               .read_word_data = max20730_read_word_data,
+               .write_word_data = max20730_write_word_data,
+
+               /* Source : Maxim AN6209 */
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_TEMPERATURE] = 21,
+               .b[PSC_TEMPERATURE] = 5887,
+               .R[PSC_TEMPERATURE] = -1,
+
+               .format[PSC_VOLTAGE_IN] = direct,
+               .m[PSC_VOLTAGE_IN] = 3592,
+               .b[PSC_VOLTAGE_IN] = 0,
+               .R[PSC_VOLTAGE_IN] = -2,
+
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_CURRENT_OUT] = 111,
+               .b[PSC_CURRENT_OUT] = 3461,
+               .R[PSC_CURRENT_OUT] = -1,
+
+               .format[PSC_VOLTAGE_OUT] = linear,
+
+               .func[0] = PMBUS_HAVE_VIN |
+                       PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+                       PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+       },
+       [max20743] = {
+               .pages = 1,
+               .read_word_data = max20730_read_word_data,
+               .write_word_data = max20730_write_word_data,
+
+               /* Source : Maxim AN6042 */
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_TEMPERATURE] = 21,
+               .b[PSC_TEMPERATURE] = 5887,
+               .R[PSC_TEMPERATURE] = -1,
+
+               .format[PSC_VOLTAGE_IN] = direct,
+               .m[PSC_VOLTAGE_IN] = 3597,
+               .b[PSC_VOLTAGE_IN] = 0,
+               .R[PSC_VOLTAGE_IN] = -2,
+
+               .format[PSC_CURRENT_OUT] = direct,
+               .m[PSC_CURRENT_OUT] = 95,
+               .b[PSC_CURRENT_OUT] = 5014,
+               .R[PSC_CURRENT_OUT] = -1,
+
+               .format[PSC_VOLTAGE_OUT] = linear,
+
+               .func[0] = PMBUS_HAVE_VIN |
+                       PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+                       PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+                       PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+       },
+};
+
+static int max20730_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+       struct max20730_data *data;
+       enum chips chip_id;
+       int ret;
+
+       if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_SMBUS_READ_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_READ_WORD_DATA |
+                                    I2C_FUNC_SMBUS_BLOCK_DATA))
+               return -ENODEV;
+
+       ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+       if (ret < 0) {
+               dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+               return ret;
+       }
+       if (ret != 5 || strncmp(buf, "MAXIM", 5)) {
+               buf[ret] = '\0';
+               dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+               return -ENODEV;
+       }
+
+       /*
+        * The chips support reading PMBUS_MFR_MODEL. On both MAX20730
+        * and MAX20734, reading it returns M20743. Presumably that is
+        * the reason why the command is not documented. Unfortunately,
+        * that means that there is no reliable means to detect the chip.
+        * However, we can at least detect the chip series. Compare
+        * the returned value against 'M20743' and bail out if there is
+        * a mismatch. If that doesn't work for all chips, we may have
+        * to remove this check.
+        */
+       ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read Manufacturer Model\n");
+               return ret;
+       }
+       if (ret != 6 || strncmp(buf, "M20743", 6)) {
+               buf[ret] = '\0';
+               dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf);
+               return -ENODEV;
+       }
+
+       ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read Manufacturer Revision\n");
+               return ret;
+       }
+       if (ret != 1 || buf[0] != 'F') {
+               buf[ret] = '\0';
+               dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf);
+               return -ENODEV;
+       }
+
+       if (client->dev.of_node)
+               chip_id = (enum chips)of_device_get_match_data(dev);
+       else
+               chip_id = id->driver_data;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       data->id = chip_id;
+       mutex_init(&data->lock);
+       memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+
+       ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
+       if (ret < 0)
+               return ret;
+       data->mfr_devset1 = ret;
+
+       return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max20730_id[] = {
+       { "max20730", max20730 },
+       { "max20734", max20734 },
+       { "max20743", max20743 },
+       { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20730_id);
+
+static const struct of_device_id max20730_of_match[] = {
+       { .compatible = "maxim,max20730", .data = (void *)max20730 },
+       { .compatible = "maxim,max20734", .data = (void *)max20734 },
+       { .compatible = "maxim,max20743", .data = (void *)max20743 },
+       { },
+};
+
+MODULE_DEVICE_TABLE(of, max20730_of_match);
+
+static struct i2c_driver max20730_driver = {
+       .driver = {
+               .name = "max20730",
+               .of_match_table = max20730_of_match,
+       },
+       .probe = max20730_probe,
+       .remove = pmbus_do_remove,
+       .id_table = max20730_id,
+};
+
+module_i2c_driver(max20730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743");
+MODULE_LICENSE("GPL");
index ee5f0cdbde06291aa96cb78d2c84cf54aecdd3d5..da3c38cb9a5cdd19eced253eaf0eecc9eb8620e4 100644 (file)
@@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = {
        .pages = 1,
        .format[PSC_VOLTAGE_IN] = linear,
        .format[PSC_VOLTAGE_OUT] = vid,
-       .vrm_version = vr12,
+       .vrm_version[0] = vr12,
        .format[PSC_TEMPERATURE] = linear,
        .format[PSC_CURRENT_OUT] = linear,
        .format[PSC_POWER] = linear,
index c0bc43d010186dffef6d6f09278f78009705abe6..51e8312b6c2dbee851a4f592c59b6afb891dcdf6 100644 (file)
@@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client,
        }
 
        if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
-               int vout_mode;
+               int vout_mode, i;
 
                vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
                if (vout_mode >= 0 && vout_mode != 0xff) {
@@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client,
                                break;
                        case 1:
                                info->format[PSC_VOLTAGE_OUT] = vid;
-                               info->vrm_version = vr11;
+                               for (i = 0; i < info->pages; i++)
+                                       info->vrm_version[i] = vr11;
                                break;
                        case 2:
                                info->format[PSC_VOLTAGE_OUT] = direct;
@@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = {
        {"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
        {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
        {"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+       {"max20796", (kernel_ulong_t)&pmbus_info_one},
        {"mdt040", (kernel_ulong_t)&pmbus_info_one},
        {"ncp4200", (kernel_ulong_t)&pmbus_info_one},
        {"ncp4208", (kernel_ulong_t)&pmbus_info_one},
index d198af3a92b6fd03a18f4b0a6f15a69f03dd6cc9..13b34bd67f2315db0c0ce38bb7d592150b24a713 100644 (file)
@@ -22,6 +22,8 @@ enum pmbus_regs {
        PMBUS_CLEAR_FAULTS              = 0x03,
        PMBUS_PHASE                     = 0x04,
 
+       PMBUS_WRITE_PROTECT             = 0x10,
+
        PMBUS_CAPABILITY                = 0x19,
        PMBUS_QUERY                     = 0x1A,
 
@@ -225,6 +227,15 @@ enum pmbus_regs {
  */
 #define PB_OPERATION_CONTROL_ON                BIT(7)
 
+/*
+ * WRITE_PROTECT
+ */
+#define PB_WP_ALL      BIT(7)  /* all but WRITE_PROTECT */
+#define PB_WP_OP       BIT(6)  /* all but WP, OPERATION, PAGE */
+#define PB_WP_VOUT     BIT(5)  /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */
+
+#define PB_WP_ANY      (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT)
+
 /*
  * CAPABILITY
  */
@@ -377,12 +388,12 @@ enum pmbus_sensor_classes {
 #define PMBUS_PAGE_VIRTUAL     BIT(31)
 
 enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12, vr13 };
+enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
 
 struct pmbus_driver_info {
        int pages;              /* Total number of pages */
        enum pmbus_data_format format[PSC_NUM_CLASSES];
-       enum vrm_version vrm_version;
+       enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
        /*
         * Support one set of coefficients for each sensor type
         * Used for chips providing data in direct mode.
index 8470097907bc20d9bf7189793056e3b8e9754efd..d9c17feb7b4ac4f8367f0507cf6693ba18efbc96 100644 (file)
@@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
        long val = sensor->data;
        long rv = 0;
 
-       switch (data->info->vrm_version) {
+       switch (data->info->vrm_version[sensor->page]) {
        case vr11:
                if (val >= 0x02 && val <= 0xb2)
                        rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
@@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
                if (val >= 0x01)
                        rv = 500 + (val - 1) * 10;
                break;
+       case imvp9:
+               if (val >= 0x01)
+                       rv = 200 + (val - 1) * 10;
+               break;
+       case amd625mv:
+               if (val >= 0x0 && val <= 0xd8)
+                       rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100);
+               break;
        }
        return rv;
 }
@@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
                snprintf(sensor->name, sizeof(sensor->name), "%s%d",
                         name, seq);
 
+       if (data->flags & PMBUS_WRITE_PROTECTED)
+               readonly = true;
+
        sensor->page = page;
        sensor->reg = reg;
        sensor->class = class;
@@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
        if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
                client->flags |= I2C_CLIENT_PEC;
 
+       /*
+        * Check if the chip is write protected. If it is, we can not clear
+        * faults, and we should not try it. Also, in that case, writes into
+        * limit registers need to be disabled.
+        */
+       ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+       if (ret > 0 && (ret & PB_WP_ANY))
+               data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+
        if (data->info->pages)
                pmbus_clear_faults(client);
        else
index ebe3f023f840cb60ffb8e08f70a27388bc597d13..517584cff3deabdbae3134ed0049263d0c39627d 100644 (file)
 static int pxe1610_identify(struct i2c_client *client,
                             struct pmbus_driver_info *info)
 {
-       if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
-               u8 vout_mode;
-               int ret;
-
-               /* Read the register with VOUT scaling value.*/
-               ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
-               if (ret < 0)
-                       return ret;
-
-               vout_mode = ret & GENMASK(4, 0);
-
-               switch (vout_mode) {
-               case 1:
-                       info->vrm_version = vr12;
-                       break;
-               case 2:
-                       info->vrm_version = vr13;
-                       break;
-               default:
-                       return -ENODEV;
+       int i;
+
+       for (i = 0; i < PXE1610_NUM_PAGES; i++) {
+               if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) {
+                       u8 vout_mode;
+                       int ret;
+
+                       /* Read the register with VOUT scaling value.*/
+                       ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+                       if (ret < 0)
+                               return ret;
+
+                       vout_mode = ret & GENMASK(4, 0);
+
+                       switch (vout_mode) {
+                       case 1:
+                               info->vrm_version[i] = vr12;
+                               break;
+                       case 2:
+                               info->vrm_version[i] = vr13;
+                               break;
+                       default:
+                               return -ENODEV;
+                       }
                }
        }
 
index 86bb3aca09ed3c802d875c297bcca64cc8264606..9c22e9013dd7404e1c3828b07a8d73532f0f795e 100644 (file)
@@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client,
                             struct pmbus_driver_info *info)
 {
        u8 vout_params;
-       int ret;
-
-       /* Read the register with VOUT scaling value.*/
-       ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
-       if (ret < 0)
-               return ret;
-
-       vout_params = ret & GENMASK(4, 0);
-
-       switch (vout_params) {
-       case TPS53679_PROT_VR13_10MV:
-       case TPS53679_PROT_VR12_5_10MV:
-               info->vrm_version = vr13;
-               break;
-       case TPS53679_PROT_VR13_5MV:
-       case TPS53679_PROT_VR12_5MV:
-       case TPS53679_PROT_IMVP8_5MV:
-               info->vrm_version = vr12;
-               break;
-       default:
-               return -EINVAL;
+       int i, ret;
+
+       for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+               /* Read the register with VOUT scaling value.*/
+               ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+               if (ret < 0)
+                       return ret;
+
+               vout_params = ret & GENMASK(4, 0);
+
+               switch (vout_params) {
+               case TPS53679_PROT_VR13_10MV:
+               case TPS53679_PROT_VR12_5_10MV:
+                       info->vrm_version[i] = vr13;
+                       break;
+               case TPS53679_PROT_VR13_5MV:
+               case TPS53679_PROT_VR12_5MV:
+               case TPS53679_PROT_IMVP8_5MV:
+                       info->vrm_version[i] = vr12;
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        return 0;
@@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client,
 
 static const struct i2c_device_id tps53679_id[] = {
        {"tps53679", 0},
+       {"tps53688", 0},
        {}
 };
 
@@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id);
 
 static const struct of_device_id __maybe_unused tps53679_of_match[] = {
        {.compatible = "ti,tps53679"},
+       {.compatible = "ti,tps53688"},
        {}
 };
 MODULE_DEVICE_TABLE(of, tps53679_of_match);
index a9229c6b0e84dcc7435d18bd9e19bef9a60227ae..23ea3415f1664d93459ee84e865a0ad3a14dc375 100644 (file)
@@ -18,7 +18,8 @@
 #include <linux/gpio/driver.h>
 #include "pmbus.h"
 
-enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+            ucd90910 };
 
 #define UCD9000_MONITOR_CONFIG         0xd5
 #define UCD9000_NUM_PAGES              0xd6
@@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
 #define UCD9000_GPIO_OUTPUT            1
 
 #define UCD9000_MON_TYPE(x)    (((x) >> 5) & 0x07)
-#define UCD9000_MON_PAGE(x)    ((x) & 0x0f)
+#define UCD9000_MON_PAGE(x)    ((x) & 0x1f)
 
 #define UCD9000_MON_VOLTAGE    1
 #define UCD9000_MON_TEMPERATURE        2
@@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
 #define UCD9000_GPIO_NAME_LEN  16
 #define UCD9090_NUM_GPIOS      23
 #define UCD901XX_NUM_GPIOS     26
+#define UCD90320_NUM_GPIOS     84
 #define UCD90910_NUM_GPIOS     26
 
 #define UCD9000_DEBUGFS_NAME_LEN       24
 #define UCD9000_GPI_COUNT              8
+#define UCD90320_GPI_COUNT             32
 
 struct ucd9000_data {
        u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
@@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = {
        {"ucd90120", ucd90120},
        {"ucd90124", ucd90124},
        {"ucd90160", ucd90160},
+       {"ucd90320", ucd90320},
        {"ucd9090", ucd9090},
        {"ucd90910", ucd90910},
        {}
@@ -154,6 +158,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
                .compatible = "ti,ucd90160",
                .data = (void *)ucd90160
        },
+       {
+               .compatible = "ti,ucd90320",
+               .data = (void *)ucd90320
+       },
        {
                .compatible = "ti,ucd9090",
                .data = (void *)ucd9090
@@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
        case ucd90160:
                data->gpio.ngpio = UCD901XX_NUM_GPIOS;
                break;
+       case ucd90320:
+               data->gpio.ngpio = UCD90320_NUM_GPIOS;
+               break;
        case ucd90910:
                data->gpio.ngpio = UCD90910_NUM_GPIOS;
                break;
@@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val)
        struct ucd9000_debugfs_entry *entry = data;
        struct i2c_client *client = entry->client;
        u8 buffer[I2C_SMBUS_BLOCK_MAX];
-       int ret;
+       int ret, i;
 
        ret = ucd9000_get_mfr_status(client, buffer);
        if (ret < 0)
                return ret;
 
        /*
-        * Attribute only created for devices with gpi fault bits at bits
-        * 16-23, which is the second byte of the response.
+        * GPI fault bits are in sets of 8, two bytes from end of response.
         */
-       *val = !!(buffer[1] & BIT(entry->index));
+       i = ret - 3 - entry->index / 8;
+       if (i >= 0)
+               *val = !!(buffer[i] & BIT(entry->index % 8));
 
        return 0;
 }
@@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
 {
        struct dentry *debugfs;
        struct ucd9000_debugfs_entry *entries;
-       int i;
+       int i, gpi_count;
        char name[UCD9000_DEBUGFS_NAME_LEN];
 
        debugfs = pmbus_get_debugfs_dir(client);
@@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
 
        /*
         * Of the chips this driver supports, only the UCD9090, UCD90160,
-        * and UCD90910 report GPI faults in their MFR_STATUS register, so only
-        * create the GPI fault debugfs attributes for those chips.
+        * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS
+        * register, so only create the GPI fault debugfs attributes for those
+        * chips.
         */
        if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 ||
-           mid->driver_data == ucd90910) {
+           mid->driver_data == ucd90320 || mid->driver_data == ucd90910) {
+               gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT
+                                                        : UCD9000_GPI_COUNT;
                entries = devm_kcalloc(&client->dev,
-                                      UCD9000_GPI_COUNT, sizeof(*entries),
+                                      gpi_count, sizeof(*entries),
                                       GFP_KERNEL);
                if (!entries)
                        return -ENOMEM;
 
-               for (i = 0; i < UCD9000_GPI_COUNT; i++) {
+               for (i = 0; i < gpi_count; i++) {
                        entries[i].client = client;
                        entries[i].index = i;
                        scnprintf(name, UCD9000_DEBUGFS_NAME_LEN,
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
new file mode 100644 (file)
index 0000000..3d47806
--- /dev/null
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers
+ *
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define XDPE122_PROT_VR12_5MV          0x01 /* VR12.0 mode, 5-mV DAC */
+#define XDPE122_PROT_VR12_5_10MV       0x02 /* VR12.5 mode, 10-mV DAC */
+#define XDPE122_PROT_IMVP9_10MV                0x03 /* IMVP9 mode, 10-mV DAC */
+#define XDPE122_AMD_625MV              0x10 /* AMD mode 6.25mV */
+#define XDPE122_PAGE_NUM               2
+
+static int xdpe122_identify(struct i2c_client *client,
+                           struct pmbus_driver_info *info)
+{
+       u8 vout_params;
+       int i, ret;
+
+       for (i = 0; i < XDPE122_PAGE_NUM; i++) {
+               /* Read the register with VOUT scaling value.*/
+               ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+               if (ret < 0)
+                       return ret;
+
+               vout_params = ret & GENMASK(4, 0);
+
+               switch (vout_params) {
+               case XDPE122_PROT_VR12_5_10MV:
+                       info->vrm_version[i] = vr13;
+                       break;
+               case XDPE122_PROT_VR12_5MV:
+                       info->vrm_version[i] = vr12;
+                       break;
+               case XDPE122_PROT_IMVP9_10MV:
+                       info->vrm_version[i] = imvp9;
+                       break;
+               case XDPE122_AMD_625MV:
+                       info->vrm_version[i] = amd625mv;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static struct pmbus_driver_info xdpe122_info = {
+       .pages = XDPE122_PAGE_NUM,
+       .format[PSC_VOLTAGE_IN] = linear,
+       .format[PSC_VOLTAGE_OUT] = vid,
+       .format[PSC_TEMPERATURE] = linear,
+       .format[PSC_CURRENT_IN] = linear,
+       .format[PSC_CURRENT_OUT] = linear,
+       .format[PSC_POWER] = linear,
+       .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+               PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+               PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+               PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+       .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+               PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+               PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+               PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+       .identify = xdpe122_identify,
+};
+
+static int xdpe122_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct pmbus_driver_info *info;
+
+       info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info),
+                           GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id xdpe122_id[] = {
+       {"xdpe12254", 0},
+       {"xdpe12284", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, xdpe122_id);
+
+static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+       {.compatible = "infineon, xdpe12254"},
+       {.compatible = "infineon, xdpe12284"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, xdpe122_of_match);
+
+static struct i2c_driver xdpe122_driver = {
+       .driver = {
+               .name = "xdpe12284",
+               .of_match_table = of_match_ptr(xdpe122_of_match),
+       },
+       .probe = xdpe122_probe,
+       .remove = pmbus_do_remove,
+       .id_table = xdpe122_id,
+};
+
+module_i2c_driver(xdpe122_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
+MODULE_LICENSE("GPL");
index 42ffd2e5182d57edc3bf3c0e54513f0dc4422bf7..30b7b3ea8836fc97318091ffa548f3e632bd2bf4 100644 (file)
@@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int pwm_fan_suspend(struct device *dev)
+static int pwm_fan_disable(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
        struct pwm_args args;
@@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev)
        return 0;
 }
 
+static void pwm_fan_shutdown(struct platform_device *pdev)
+{
+       pwm_fan_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fan_suspend(struct device *dev)
+{
+       return pwm_fan_disable(dev);
+}
+
 static int pwm_fan_resume(struct device *dev)
 {
        struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
@@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
 
 static struct platform_driver pwm_fan_driver = {
        .probe          = pwm_fan_probe,
+       .shutdown       = pwm_fan_shutdown,
        .driver = {
                .name           = "pwm-fan",
                .pm             = &pwm_fan_pm,
index eb171d15ac489ec12792e22c072751942a79f108..7ffadc2da57b537d2638f0829fa778318546a1e3 100644 (file)
@@ -28,8 +28,6 @@
  *  w83627uhg    8      2       2       3      0xa230 0xc1    0x5ca3
  *  w83667hg     9      5       3       3      0xa510 0xc1    0x5ca3
  *  w83667hg-b   9      5       3       4      0xb350 0xc1    0x5ca3
- *  nct6775f     9      4       3       9      0xb470 0xc1    0x5ca3
- *  nct6776f     9      5       3       9      0xC330 0xc1    0x5ca3
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,7 +48,7 @@
 
 enum kinds {
        w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
-       w83667hg, w83667hg_b, nct6775, nct6776,
+       w83667hg, w83667hg_b,
 };
 
 /* used to set data->name = w83627ehf_device_names[data->sio_kind] */
@@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = {
        "w83627uhg",
        "w83667hg",
        "w83667hg",
-       "nct6775",
-       "nct6776",
 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-static unsigned short fan_debounce;
-module_param(fan_debounce, ushort, 0);
-MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
-
 #define DRVNAME "w83627ehf"
 
 /*
@@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
 #define SIO_W83627UHG_ID       0xa230
 #define SIO_W83667HG_ID                0xa510
 #define SIO_W83667HG_B_ID      0xb350
-#define SIO_NCT6775_ID         0xb470
-#define SIO_NCT6776_ID         0xc330
 #define SIO_ID_MASK            0xFFF0
 
 static inline void
@@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
 #define W83627EHF_REG_DIODE            0x59
 #define W83627EHF_REG_SMI_OVT          0x4C
 
-/* NCT6775F has its own fan divider registers */
-#define NCT6775_REG_FANDIV1            0x506
-#define NCT6775_REG_FANDIV2            0x507
-#define NCT6775_REG_FAN_DEBOUNCE       0xf0
-
 #define W83627EHF_REG_ALARM1           0x459
 #define W83627EHF_REG_ALARM2           0x45A
 #define W83627EHF_REG_ALARM3           0x45B
@@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
 
 static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
 
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
-static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
-static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
-static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
-static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
-static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
-static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
-
-static const u16 NCT6775_REG_TEMP[]
-       = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
-static const u16 NCT6775_REG_TEMP_CONFIG[]
-       = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
-static const u16 NCT6775_REG_TEMP_HYST[]
-       = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
-static const u16 NCT6775_REG_TEMP_OVER[]
-       = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
-static const u16 NCT6775_REG_TEMP_SOURCE[]
-       = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
-
 static const char *const w83667hg_b_temp_label[] = {
        "SYSTIN",
        "CPUTIN",
@@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = {
        "PECI Agent 4"
 };
 
-static const char *const nct6775_temp_label[] = {
-       "",
-       "SYSTIN",
-       "CPUTIN",
-       "AUXTIN",
-       "AMD SB-TSI",
-       "PECI Agent 0",
-       "PECI Agent 1",
-       "PECI Agent 2",
-       "PECI Agent 3",
-       "PECI Agent 4",
-       "PECI Agent 5",
-       "PECI Agent 6",
-       "PECI Agent 7",
-       "PCH_CHIP_CPU_MAX_TEMP",
-       "PCH_CHIP_TEMP",
-       "PCH_CPU_TEMP",
-       "PCH_MCH_TEMP",
-       "PCH_DIM0_TEMP",
-       "PCH_DIM1_TEMP",
-       "PCH_DIM2_TEMP",
-       "PCH_DIM3_TEMP"
-};
-
-static const char *const nct6776_temp_label[] = {
-       "",
-       "SYSTIN",
-       "CPUTIN",
-       "AUXTIN",
-       "SMBUSMASTER 0",
-       "SMBUSMASTER 1",
-       "SMBUSMASTER 2",
-       "SMBUSMASTER 3",
-       "SMBUSMASTER 4",
-       "SMBUSMASTER 5",
-       "SMBUSMASTER 6",
-       "SMBUSMASTER 7",
-       "PECI Agent 0",
-       "PECI Agent 1",
-       "PCH_CHIP_CPU_MAX_TEMP",
-       "PCH_CHIP_TEMP",
-       "PCH_CPU_TEMP",
-       "PCH_MCH_TEMP",
-       "PCH_DIM0_TEMP",
-       "PCH_DIM1_TEMP",
-       "PCH_DIM2_TEMP",
-       "PCH_DIM3_TEMP",
-       "BYTE_TEMP"
-};
-
-#define NUM_REG_TEMP   ARRAY_SIZE(NCT6775_REG_TEMP)
+#define NUM_REG_TEMP   ARRAY_SIZE(W83627EHF_REG_TEMP)
 
 static int is_word_sized(u16 reg)
 {
@@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
        return 1350000U / (reg << divreg);
 }
 
-static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
-{
-       if ((reg & 0xff1f) == 0xff1f)
-               return 0;
-
-       reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
-
-       if (reg == 0)
-               return 0;
-
-       return 1350000U / reg;
-}
-
-static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
-{
-       if (reg == 0 || reg == 0xffff)
-               return 0;
-
-       /*
-        * Even though the registers are 16 bit wide, the fan divisor
-        * still applies.
-        */
-       return 1350000U / (reg << divreg);
-}
-
 static inline unsigned int
 div_from_reg(u8 reg)
 {
@@ -418,7 +306,6 @@ struct w83627ehf_data {
        int addr;       /* IO base of hw monitor block */
        const char *name;
 
-       struct device *hwmon_dev;
        struct mutex lock;
 
        u16 reg_temp[NUM_REG_TEMP];
@@ -428,20 +315,10 @@ struct w83627ehf_data {
        u8 temp_src[NUM_REG_TEMP];
        const char * const *temp_label;
 
-       const u16 *REG_PWM;
-       const u16 *REG_TARGET;
-       const u16 *REG_FAN;
-       const u16 *REG_FAN_MIN;
-       const u16 *REG_FAN_START_OUTPUT;
-       const u16 *REG_FAN_STOP_OUTPUT;
-       const u16 *REG_FAN_STOP_TIME;
        const u16 *REG_FAN_MAX_OUTPUT;
        const u16 *REG_FAN_STEP_OUTPUT;
        const u16 *scale_in;
 
-       unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
-       unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
-
        struct mutex update_lock;
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
@@ -457,7 +334,6 @@ struct w83627ehf_data {
        u8 fan_div[5];
        u8 has_fan;             /* some fan inputs can be disabled */
        u8 has_fan_min;         /* some fans don't have min register */
-       bool has_fan_div;
        u8 temp_type[3];
        s8 temp_offset[3];
        s16 temp[9];
@@ -494,6 +370,7 @@ struct w83627ehf_data {
        u16 have_temp_offset;
        u8 in6_skip:1;
        u8 temp3_val_only:1;
+       u8 have_vid:1;
 
 #ifdef CONFIG_PM
        /* Remember extra register values over suspend/resume */
@@ -583,35 +460,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg,
        return w83627ehf_write_value(data, reg, value);
 }
 
-/* This function assumes that the caller holds data->update_lock */
-static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
-{
-       u8 reg;
-
-       switch (nr) {
-       case 0:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
-                   | (data->fan_div[0] & 0x7);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
-               break;
-       case 1:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
-                   | ((data->fan_div[1] << 4) & 0x70);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
-               break;
-       case 2:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
-                   | (data->fan_div[2] & 0x7);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
-               break;
-       case 3:
-               reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
-                   | ((data->fan_div[3] << 4) & 0x70);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
-               break;
-       }
-}
-
 /* This function assumes that the caller holds data->update_lock */
 static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
 {
@@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
        }
 }
 
-static void w83627ehf_write_fan_div_common(struct device *dev,
-                                          struct w83627ehf_data *data, int nr)
-{
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
-       if (sio_data->kind == nct6776)
-               ; /* no dividers, do nothing */
-       else if (sio_data->kind == nct6775)
-               nct6775_write_fan_div(data, nr);
-       else
-               w83627ehf_write_fan_div(data, nr);
-}
-
-static void nct6775_update_fan_div(struct w83627ehf_data *data)
-{
-       u8 i;
-
-       i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
-       data->fan_div[0] = i & 0x7;
-       data->fan_div[1] = (i & 0x70) >> 4;
-       i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
-       data->fan_div[2] = i & 0x7;
-       if (data->has_fan & (1<<3))
-               data->fan_div[3] = (i & 0x70) >> 4;
-}
-
 static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
 {
        int i;
@@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
        }
 }
 
-static void w83627ehf_update_fan_div_common(struct device *dev,
-                                           struct w83627ehf_data *data)
-{
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
-       if (sio_data->kind == nct6776)
-               ; /* no dividers, do nothing */
-       else if (sio_data->kind == nct6775)
-               nct6775_update_fan_div(data);
-       else
-               w83627ehf_update_fan_div(data);
-}
-
-static void nct6775_update_pwm(struct w83627ehf_data *data)
-{
-       int i;
-       int pwmcfg, fanmodecfg;
-
-       for (i = 0; i < data->pwm_num; i++) {
-               pwmcfg = w83627ehf_read_value(data,
-                                             W83627EHF_REG_PWM_ENABLE[i]);
-               fanmodecfg = w83627ehf_read_value(data,
-                                                 NCT6775_REG_FAN_MODE[i]);
-               data->pwm_mode[i] =
-                 ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
-               data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
-               data->tolerance[i] = fanmodecfg & 0x0f;
-               data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
-       }
-}
-
 static void w83627ehf_update_pwm(struct w83627ehf_data *data)
 {
        int i;
@@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
                        ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
                data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
                                       & 3) + 1;
-               data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+               data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]);
 
                data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
        }
 }
 
-static void w83627ehf_update_pwm_common(struct device *dev,
-                                       struct w83627ehf_data *data)
-{
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
-               nct6775_update_pwm(data);
-       else
-               w83627ehf_update_pwm(data);
-}
-
 static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
        int i;
 
        mutex_lock(&data->update_lock);
@@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
        if (time_after(jiffies, data->last_updated + HZ + HZ/2)
         || !data->valid) {
                /* Fan clock dividers */
-               w83627ehf_update_fan_div_common(dev, data);
+               w83627ehf_update_fan_div(data);
 
                /* Measured voltages and limits */
                for (i = 0; i < data->in_num; i++) {
@@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
                        if (!(data->has_fan & (1 << i)))
                                continue;
 
-                       reg = w83627ehf_read_value(data, data->REG_FAN[i]);
-                       data->rpm[i] = data->fan_from_reg(reg,
-                                                         data->fan_div[i]);
+                       reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]);
+                       data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]);
 
                        if (data->has_fan_min & (1 << i))
                                data->fan_min[i] = w83627ehf_read_value(data,
-                                          data->REG_FAN_MIN[i]);
+                                          W83627EHF_REG_FAN_MIN[i]);
 
                        /*
                         * If we failed to measure the fan speed and clock
                         * divider can be increased, let's try that for next
                         * time
                         */
-                       if (data->has_fan_div
-                           && (reg >= 0xff || (sio_data->kind == nct6775
-                                               && reg == 0x00))
-                           && data->fan_div[i] < 0x07) {
+                       if (reg >= 0xff && data->fan_div[i] < 0x07) {
                                dev_dbg(dev,
                                        "Increasing fan%d clock divider from %u to %u\n",
                                        i + 1, div_from_reg(data->fan_div[i]),
                                        div_from_reg(data->fan_div[i] + 1));
                                data->fan_div[i]++;
-                               w83627ehf_write_fan_div_common(dev, data, i);
+                               w83627ehf_write_fan_div(data, i);
                                /* Preserve min limit if possible */
                                if ((data->has_fan_min & (1 << i))
                                 && data->fan_min[i] >= 2
                                 && data->fan_min[i] != 255)
                                        w83627ehf_write_value(data,
-                                               data->REG_FAN_MIN[i],
+                                               W83627EHF_REG_FAN_MIN[i],
                                                (data->fan_min[i] /= 2));
                        }
                }
 
-               w83627ehf_update_pwm_common(dev, data);
+               w83627ehf_update_pwm(data);
 
                for (i = 0; i < data->pwm_num; i++) {
                        if (!(data->has_fan & (1 << i)))
@@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 
                        data->fan_start_output[i] =
                          w83627ehf_read_value(data,
-                                              data->REG_FAN_START_OUTPUT[i]);
+                                            W83627EHF_REG_FAN_START_OUTPUT[i]);
                        data->fan_stop_output[i] =
                          w83627ehf_read_value(data,
-                                              data->REG_FAN_STOP_OUTPUT[i]);
+                                            W83627EHF_REG_FAN_STOP_OUTPUT[i]);
                        data->fan_stop_time[i] =
                          w83627ehf_read_value(data,
-                                              data->REG_FAN_STOP_TIME[i]);
+                                              W83627EHF_REG_FAN_STOP_TIME[i]);
 
                        if (data->REG_FAN_MAX_OUTPUT &&
                            data->REG_FAN_MAX_OUTPUT[i] != 0xff)
@@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 
                        data->target_temp[i] =
                                w83627ehf_read_value(data,
-                                       data->REG_TARGET[i]) &
+                                       W83627EHF_REG_TARGET[i]) &
                                        (data->pwm_mode[i] == 1 ? 0x7f : 0xff);
                }
 
@@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
        return data;
 }
 
-/*
- * Sysfs callback functions
- */
-#define show_in_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
-          char *buf) \
-{ \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
-                      data->scale_in)); \
-}
-show_in_reg(in)
-show_in_reg(in_min)
-show_in_reg(in_max)
-
 #define store_in_reg(REG, reg) \
-static ssize_t \
-store_in_##reg(struct device *dev, struct device_attribute *attr, \
-              const char *buf, size_t count) \
+static int \
+store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+              long val) \
 { \
-       struct w83627ehf_data *data = dev_get_drvdata(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       unsigned long val; \
-       int err; \
-       err = kstrtoul(buf, 10, &val); \
-       if (err < 0) \
-               return err; \
+       if (val < 0) \
+               return -EINVAL; \
        mutex_lock(&data->update_lock); \
-       data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
-       w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
-                             data->in_##reg[nr]); \
+       data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \
+       w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \
+                             data->in_##reg[channel]); \
        mutex_unlock(&data->update_lock); \
-       return count; \
+       return 0; \
 }
 
 store_in_reg(MIN, min)
 store_in_reg(MAX, max)
 
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01);
-}
-
-static struct sensor_device_attribute sda_in_input[] = {
-       SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
-       SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
-       SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
-       SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
-       SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
-       SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
-       SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
-       SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
-       SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
-       SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
-};
-
-static struct sensor_device_attribute sda_in_alarm[] = {
-       SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
-       SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
-       SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
-       SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
-       SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
-       SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21),
-       SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20),
-       SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16),
-       SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17),
-       SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19),
-};
-
-static struct sensor_device_attribute sda_in_min[] = {
-       SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
-       SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
-       SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
-       SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
-       SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
-       SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
-       SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
-       SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
-       SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
-       SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
-};
-
-static struct sensor_device_attribute sda_in_max[] = {
-       SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
-       SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
-       SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
-       SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
-       SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
-       SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
-       SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
-       SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
-       SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
-       SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
-};
-
-static ssize_t
-show_fan(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%d\n", data->rpm[nr]);
-}
-
-static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%d\n",
-                      data->fan_from_reg_min(data->fan_min[nr],
-                                             data->fan_div[nr]));
-}
-
-static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *attr,
-            char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
-}
-
-static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *attr,
-             const char *buf, size_t count)
+static int
+store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel,
+             long val)
 {
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       unsigned long val;
-       int err;
        unsigned int reg;
        u8 new_div;
 
-       err = kstrtoul(buf, 10, &val);
-       if (err < 0)
-               return err;
+       if (val < 0)
+               return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       if (!data->has_fan_div) {
-               /*
-                * Only NCT6776F for now, so we know that this is a 13 bit
-                * register
-                */
-               if (!val) {
-                       val = 0xff1f;
-               } else {
-                       if (val > 1350000U)
-                               val = 135000U;
-                       val = 1350000U / val;
-                       val = (val & 0x1f) | ((val << 3) & 0xff00);
-               }
-               data->fan_min[nr] = val;
-               goto done;      /* Leave fan divider alone */
-       }
        if (!val) {
                /* No min limit, alarm disabled */
-               data->fan_min[nr] = 255;
-               new_div = data->fan_div[nr]; /* No change */
-               dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1);
+               data->fan_min[channel] = 255;
+               new_div = data->fan_div[channel]; /* No change */
+               dev_info(dev, "fan%u low limit and alarm disabled\n",
+                        channel + 1);
        } else if ((reg = 1350000U / val) >= 128 * 255) {
                /*
                 * Speed below this value cannot possibly be represented,
                 * even with the highest divider (128)
                 */
-               data->fan_min[nr] = 254;
+               data->fan_min[channel] = 254;
                new_div = 7; /* 128 == (1 << 7) */
                dev_warn(dev,
                         "fan%u low limit %lu below minimum %u, set to minimum\n",
-                        nr + 1, val, data->fan_from_reg_min(254, 7));
+                        channel + 1, val, fan_from_reg8(254, 7));
        } else if (!reg) {
                /*
                 * Speed above this value cannot possibly be represented,
                 * even with the lowest divider (1)
                 */
-               data->fan_min[nr] = 1;
+               data->fan_min[channel] = 1;
                new_div = 0; /* 1 == (1 << 0) */
                dev_warn(dev,
                         "fan%u low limit %lu above maximum %u, set to maximum\n",
-                        nr + 1, val, data->fan_from_reg_min(1, 0));
+                        channel + 1, val, fan_from_reg8(1, 0));
        } else {
                /*
                 * Automatically pick the best divider, i.e. the one such
@@ -1127,390 +763,145 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
                        reg >>= 1;
                        new_div++;
                }
-               data->fan_min[nr] = reg;
+               data->fan_min[channel] = reg;
        }
 
        /*
         * Write both the fan clock divider (if it changed) and the new
         * fan min (unconditionally)
         */
-       if (new_div != data->fan_div[nr]) {
+       if (new_div != data->fan_div[channel]) {
                dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
-                       nr + 1, div_from_reg(data->fan_div[nr]),
+                       channel + 1, div_from_reg(data->fan_div[channel]),
                        div_from_reg(new_div));
-               data->fan_div[nr] = new_div;
-               w83627ehf_write_fan_div_common(dev, data, nr);
+               data->fan_div[channel] = new_div;
+               w83627ehf_write_fan_div(data, channel);
                /* Give the chip time to sample a new speed value */
                data->last_updated = jiffies;
        }
-done:
-       w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
-                             data->fan_min[nr]);
-       mutex_unlock(&data->update_lock);
-
-       return count;
-}
-
-static struct sensor_device_attribute sda_fan_input[] = {
-       SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
-       SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
-       SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
-       SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
-       SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
 
-static struct sensor_device_attribute sda_fan_alarm[] = {
-       SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
-       SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
-       SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
-       SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10),
-       SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23),
-};
-
-static struct sensor_device_attribute sda_fan_min[] = {
-       SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 0),
-       SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 1),
-       SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 2),
-       SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 3),
-       SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 4),
-};
-
-static struct sensor_device_attribute sda_fan_div[] = {
-       SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
-       SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
-       SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
-       SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
-       SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
-};
-
-static ssize_t
-show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
-}
+       w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel],
+                             data->fan_min[channel]);
+       mutex_unlock(&data->update_lock);
 
-#define show_temp_reg(addr, reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
-          char *buf) \
-{ \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
+       return 0;
 }
-show_temp_reg(reg_temp, temp);
-show_temp_reg(reg_temp_over, temp_max);
-show_temp_reg(reg_temp_hyst, temp_max_hyst);
 
 #define store_temp_reg(addr, reg) \
-static ssize_t \
-store_##reg(struct device *dev, struct device_attribute *attr, \
-           const char *buf, size_t count) \
+static int \
+store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+           long val) \
 { \
-       struct w83627ehf_data *data = dev_get_drvdata(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       int err; \
-       long val; \
-       err = kstrtol(buf, 10, &val); \
-       if (err < 0) \
-               return err; \
        mutex_lock(&data->update_lock); \
-       data->reg[nr] = LM75_TEMP_TO_REG(val); \
-       w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
+       data->reg[channel] = LM75_TEMP_TO_REG(val); \
+       w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \
        mutex_unlock(&data->update_lock); \
-       return count; \
+       return 0; \
 }
 store_temp_reg(reg_temp_over, temp_max);
 store_temp_reg(reg_temp_hyst, temp_max_hyst);
 
-static ssize_t
-show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel,
+                 long val)
 {
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
 
-       return sprintf(buf, "%d\n",
-                      data->temp_offset[sensor_attr->index] * 1000);
+       mutex_lock(&data->update_lock);
+       data->temp_offset[channel] = val;
+       w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val);
+       mutex_unlock(&data->update_lock);
+       return 0;
 }
 
-static ssize_t
-store_temp_offset(struct device *dev, struct device_attribute *attr,
-                 const char *buf, size_t count)
+static int
+store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel,
+              long val)
 {
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       long val;
-       int err;
-
-       err = kstrtol(buf, 10, &val);
-       if (err < 0)
-               return err;
+       u16 reg;
 
-       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+       if (val < 0 || val > 1)
+               return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       data->temp_offset[nr] = val;
-       w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val);
+       reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]);
+       data->pwm_mode[channel] = val;
+       reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]);
+       if (!val)
+               reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel];
+       w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg);
        mutex_unlock(&data->update_lock);
-       return count;
+       return 0;
 }
 
-static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_pwm(struct device *dev, struct w83627ehf_data *data, int channel,
+         long val)
 {
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
-}
-
-static struct sensor_device_attribute sda_temp_input[] = {
-       SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
-       SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
-       SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
-       SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
-       SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
-       SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
-       SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
-       SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
-       SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_label[] = {
-       SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
-       SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
-       SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
-       SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
-       SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
-       SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
-       SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
-       SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
-       SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max[] = {
-       SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 0),
-       SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 1),
-       SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 2),
-       SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 3),
-       SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 4),
-       SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 5),
-       SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 6),
-       SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 7),
-       SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
-                   store_temp_max, 8),
-};
+       val = clamp_val(val, 0, 255);
 
-static struct sensor_device_attribute sda_temp_max_hyst[] = {
-       SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 0),
-       SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 1),
-       SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 2),
-       SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 3),
-       SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 4),
-       SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 5),
-       SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 6),
-       SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 7),
-       SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
-                   store_temp_max_hyst, 8),
-};
+       mutex_lock(&data->update_lock);
+       data->pwm[channel] = val;
+       w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val);
+       mutex_unlock(&data->update_lock);
+       return 0;
+}
 
-static struct sensor_device_attribute sda_temp_alarm[] = {
-       SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
-       SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
-       SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
-};
+static int
+store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel,
+                long val)
+{
+       u16 reg;
 
-static struct sensor_device_attribute sda_temp_type[] = {
-       SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
-       SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
-       SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-};
+       if (!val || val < 0 ||
+           (val > 4 && val != data->pwm_enable_orig[channel]))
+               return -EINVAL;
 
-static struct sensor_device_attribute sda_temp_offset[] = {
-       SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 0),
-       SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 1),
-       SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 2),
-};
+       mutex_lock(&data->update_lock);
+       data->pwm_enable[channel] = val;
+       reg = w83627ehf_read_value(data,
+                                  W83627EHF_REG_PWM_ENABLE[channel]);
+       reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]);
+       reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel];
+       w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel],
+                             reg);
+       mutex_unlock(&data->update_lock);
+       return 0;
+}
 
-#define show_pwm_reg(reg) \
+#define show_tol_temp(reg) \
 static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
-                         char *buf) \
+                               char *buf) \
 { \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
        struct sensor_device_attribute *sensor_attr = \
                to_sensor_dev_attr(attr); \
        int nr = sensor_attr->index; \
-       return sprintf(buf, "%d\n", data->reg[nr]); \
+       return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
 }
 
-show_pwm_reg(pwm_mode)
-show_pwm_reg(pwm_enable)
-show_pwm_reg(pwm)
+show_tol_temp(tolerance)
+show_tol_temp(target_temp)
 
 static ssize_t
-store_pwm_mode(struct device *dev, struct device_attribute *attr,
+store_target_temp(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        int nr = sensor_attr->index;
-       unsigned long val;
+       long val;
        int err;
-       u16 reg;
 
-       err = kstrtoul(buf, 10, &val);
+       err = kstrtol(buf, 10, &val);
        if (err < 0)
                return err;
 
-       if (val > 1)
-               return -EINVAL;
-
-       /* On NCT67766F, DC mode is only supported for pwm1 */
-       if (sio_data->kind == nct6776 && nr && val != 1)
-               return -EINVAL;
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
 
        mutex_lock(&data->update_lock);
-       reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
-       data->pwm_mode[nr] = val;
-       reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
-       if (!val)
-               reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
-       w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
-       mutex_unlock(&data->update_lock);
-       return count;
-}
-
-static ssize_t
-store_pwm(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       unsigned long val;
-       int err;
-
-       err = kstrtoul(buf, 10, &val);
-       if (err < 0)
-               return err;
-
-       val = clamp_val(val, 0, 255);
-
-       mutex_lock(&data->update_lock);
-       data->pwm[nr] = val;
-       w83627ehf_write_value(data, data->REG_PWM[nr], val);
-       mutex_unlock(&data->update_lock);
-       return count;
-}
-
-static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       unsigned long val;
-       int err;
-       u16 reg;
-
-       err = kstrtoul(buf, 10, &val);
-       if (err < 0)
-               return err;
-
-       if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
-               return -EINVAL;
-       /* SmartFan III mode is not supported on NCT6776F */
-       if (sio_data->kind == nct6776 && val == 4)
-               return -EINVAL;
-
-       mutex_lock(&data->update_lock);
-       data->pwm_enable[nr] = val;
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               reg = w83627ehf_read_value(data,
-                                          NCT6775_REG_FAN_MODE[nr]);
-               reg &= 0x0f;
-               reg |= (val - 1) << 4;
-               w83627ehf_write_value(data,
-                                     NCT6775_REG_FAN_MODE[nr], reg);
-       } else {
-               reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
-               reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
-               reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
-               w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
-       }
-       mutex_unlock(&data->update_lock);
-       return count;
-}
-
-
-#define show_tol_temp(reg) \
-static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
-                               char *buf) \
-{ \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
-       struct sensor_device_attribute *sensor_attr = \
-               to_sensor_dev_attr(attr); \
-       int nr = sensor_attr->index; \
-       return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
-}
-
-show_tol_temp(tolerance)
-show_tol_temp(target_temp)
-
-static ssize_t
-store_target_temp(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       int nr = sensor_attr->index;
-       long val;
-       int err;
-
-       err = kstrtol(buf, 10, &val);
-       if (err < 0)
-               return err;
-
-       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
-
-       mutex_lock(&data->update_lock);
-       data->target_temp[nr] = val;
-       w83627ehf_write_value(data, data->REG_TARGET[nr], val);
+       data->target_temp[nr] = val;
+       w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
        mutex_unlock(&data->update_lock);
        return count;
 }
@@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int nr = sensor_attr->index;
        u16 reg;
@@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
        val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
 
        mutex_lock(&data->update_lock);
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               /* Limit tolerance further for NCT6776F */
-               if (sio_data->kind == nct6776 && val > 7)
-                       val = 7;
-               reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
+       reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+       if (nr == 1)
+               reg = (reg & 0x0f) | (val << 4);
+       else
                reg = (reg & 0xf0) | val;
-               w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
-       } else {
-               reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
-               if (nr == 1)
-                       reg = (reg & 0x0f) | (val << 4);
-               else
-                       reg = (reg & 0xf0) | val;
-               w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
-       }
+       w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
        data->tolerance[nr] = val;
        mutex_unlock(&data->update_lock);
        return count;
 }
 
-static struct sensor_device_attribute sda_pwm[] = {
-       SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0),
-       SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1),
-       SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2),
-       SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_mode[] = {
-       SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 0),
-       SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 1),
-       SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 2),
-       SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                   store_pwm_mode, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_enable[] = {
-       SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 0),
-       SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 1),
-       SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 2),
-       SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                   store_pwm_enable, 3),
-};
-
-static struct sensor_device_attribute sda_target_temp[] = {
-       SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 0),
-       SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 1),
-       SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 2),
-       SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp,
-                   store_target_temp, 3),
-};
-
-static struct sensor_device_attribute sda_tolerance[] = {
-       SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 0),
-       SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 1),
-       SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 2),
-       SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
-                   store_tolerance, 3),
-};
+static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp,
+           store_target_temp, 0);
+static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp,
+           store_target_temp, 1);
+static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp,
+           store_target_temp, 2);
+static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp,
+           store_target_temp, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance,
+           store_tolerance, 0);
+static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance,
+           store_tolerance, 1);
+static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance,
+           store_tolerance, 2);
+static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance,
+           store_tolerance, 3);
 
 /* Smart Fan registers */
 
@@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = {
 static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
                       char *buf) \
 { \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
        struct sensor_device_attribute *sensor_attr = \
                to_sensor_dev_attr(attr); \
        int nr = sensor_attr->index; \
@@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
        val = clamp_val(val, 1, 255); \
        mutex_lock(&data->update_lock); \
        data->reg[nr] = val; \
-       w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+       w83627ehf_write_value(data, REG[nr], val); \
        mutex_unlock(&data->update_lock); \
        return count; \
 }
 
-fan_functions(fan_start_output, FAN_START_OUTPUT)
-fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
-fan_functions(fan_max_output, FAN_MAX_OUTPUT)
-fan_functions(fan_step_output, FAN_STEP_OUTPUT)
+fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT)
+fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT)
 
 #define fan_time_functions(reg, REG) \
 static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
                                char *buf) \
 { \
-       struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
        struct sensor_device_attribute *sensor_attr = \
                to_sensor_dev_attr(attr); \
        int nr = sensor_attr->index; \
@@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
        val = step_time_to_reg(val, data->pwm_mode[nr]); \
        mutex_lock(&data->update_lock); \
        data->reg[nr] = val; \
-       w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+       w83627ehf_write_value(data, REG[nr], val); \
        mutex_unlock(&data->update_lock); \
        return count; \
 } \
 
-fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-
-static ssize_t name_show(struct device *dev, struct device_attribute *attr,
-                        char *buf)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
-       SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 3),
-       SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 3),
-       SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 3),
-       SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 3),
-       SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 3),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
-       SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 2),
-       SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 2),
-       SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 2),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays[] = {
-       SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 0),
-       SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
-                   store_fan_stop_time, 1),
-       SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 0),
-       SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
-                   store_fan_start_output, 1),
-       SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 0),
-       SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
-                   store_fan_stop_output, 1),
-};
+fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME)
+
+static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 3);
+static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output,
+           store_fan_stop_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 3);
+
+static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 2);
+static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output,
+                   store_fan_stop_output, 2);
+
+static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time,
+           store_fan_stop_time, 1);
+static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output,
+           store_fan_start_output, 1);
+static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output,
+           store_fan_stop_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output,
+           store_fan_stop_output, 1);
 
 
 /*
  * pwm1 and pwm3 don't support max and step settings on all chips.
  * Need to check support while generating/removing attribute files.
  */
-static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
-       SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 0),
-       SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 0),
-       SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 1),
-       SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 1),
-       SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
-                   store_fan_max_output, 2),
-       SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
-                   store_fan_step_output, 2),
-};
+static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 0);
+static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 1);
+static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 1);
+static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output,
+           store_fan_max_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output,
+           store_fan_step_output, 2);
 
 static ssize_t
 cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
 }
-static DEVICE_ATTR_RO(cpu0_vid);
+DEVICE_ATTR_RO(cpu0_vid);
 
 
 /* Case open detection */
-
-static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
+              long val)
 {
-       struct w83627ehf_data *data = w83627ehf_update_device(dev);
-
-       return sprintf(buf, "%d\n",
-               !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
-}
-
-static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct w83627ehf_data *data = dev_get_drvdata(dev);
-       unsigned long val;
-       u16 reg, mask;
+       const u16 mask = 0x80;
+       u16 reg;
 
-       if (kstrtoul(buf, 10, &val) || val != 0)
+       if (val != 0 || channel != 0)
                return -EINVAL;
 
-       mask = to_sensor_dev_attr_2(attr)->nr;
-
        mutex_lock(&data->update_lock);
        reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
        w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
@@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
        data->valid = 0;        /* Force cache refresh */
        mutex_unlock(&data->update_lock);
 
-       return count;
+       return 0;
 }
 
-static struct sensor_device_attribute_2 sda_caseopen[] = {
-       SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
-                       clear_caseopen, 0x80, 0x10),
-       SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
-                       clear_caseopen, 0x40, 0x40),
-};
-
-/*
- * Driver and device management
- */
-
-static void w83627ehf_device_remove_files(struct device *dev)
+static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
+                                      struct attribute *a, int n)
 {
-       /*
-        * some entries in the following arrays may not have been used in
-        * device_create_file(), but device_remove_file() will ignore them
-        */
-       int i;
+       struct device *dev = container_of(kobj, struct device, kobj);
        struct w83627ehf_data *data = dev_get_drvdata(dev);
+       struct device_attribute *devattr;
+       struct sensor_device_attribute *sda;
+
+       devattr = container_of(a, struct device_attribute, attr);
+
+       /* Not sensor */
+       if (devattr->show == cpu0_vid_show && data->have_vid)
+               return a->mode;
+
+       sda = (struct sensor_device_attribute *)devattr;
+
+       if (sda->index < 2 &&
+               (devattr->show == show_fan_stop_time ||
+                devattr->show == show_fan_start_output ||
+                devattr->show == show_fan_stop_output))
+               return a->mode;
+
+       if (sda->index < 3 &&
+               (devattr->show == show_fan_max_output ||
+                devattr->show == show_fan_step_output) &&
+               data->REG_FAN_STEP_OUTPUT &&
+               data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff)
+               return a->mode;
+
+       /* if fan3 and fan4 are enabled create the files for them */
+       if (sda->index == 2 &&
+               (data->has_fan & (1 << 2)) && data->pwm_num >= 3 &&
+               (devattr->show == show_fan_stop_time ||
+                devattr->show == show_fan_start_output ||
+                devattr->show == show_fan_stop_output))
+               return a->mode;
+
+       if (sda->index == 3 &&
+               (data->has_fan & (1 << 3)) && data->pwm_num >= 4 &&
+               (devattr->show == show_fan_stop_time ||
+                devattr->show == show_fan_start_output ||
+                devattr->show == show_fan_stop_output ||
+                devattr->show == show_fan_max_output ||
+                devattr->show == show_fan_step_output))
+               return a->mode;
+
+       if ((devattr->show == show_target_temp ||
+           devattr->show == show_tolerance) &&
+           (data->has_fan & (1 << sda->index)) &&
+           sda->index < data->pwm_num)
+               return a->mode;
 
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
-               device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
-               struct sensor_device_attribute *attr =
-                 &sda_sf3_max_step_arrays[i];
-               if (data->REG_FAN_STEP_OUTPUT &&
-                   data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
-                       device_remove_file(dev, &attr->dev_attr);
-       }
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
-               device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
-               device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
-       for (i = 0; i < data->in_num; i++) {
-               if ((i == 6) && data->in6_skip)
-                       continue;
-               device_remove_file(dev, &sda_in_input[i].dev_attr);
-               device_remove_file(dev, &sda_in_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_in_min[i].dev_attr);
-               device_remove_file(dev, &sda_in_max[i].dev_attr);
-       }
-       for (i = 0; i < 5; i++) {
-               device_remove_file(dev, &sda_fan_input[i].dev_attr);
-               device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_fan_div[i].dev_attr);
-               device_remove_file(dev, &sda_fan_min[i].dev_attr);
-       }
-       for (i = 0; i < data->pwm_num; i++) {
-               device_remove_file(dev, &sda_pwm[i].dev_attr);
-               device_remove_file(dev, &sda_pwm_mode[i].dev_attr);
-               device_remove_file(dev, &sda_pwm_enable[i].dev_attr);
-               device_remove_file(dev, &sda_target_temp[i].dev_attr);
-               device_remove_file(dev, &sda_tolerance[i].dev_attr);
-       }
-       for (i = 0; i < NUM_REG_TEMP; i++) {
-               if (!(data->have_temp & (1 << i)))
-                       continue;
-               device_remove_file(dev, &sda_temp_input[i].dev_attr);
-               device_remove_file(dev, &sda_temp_label[i].dev_attr);
-               if (i == 2 && data->temp3_val_only)
-                       continue;
-               device_remove_file(dev, &sda_temp_max[i].dev_attr);
-               device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
-               if (i > 2)
-                       continue;
-               device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_temp_type[i].dev_attr);
-               device_remove_file(dev, &sda_temp_offset[i].dev_attr);
-       }
+       return 0;
+}
 
-       device_remove_file(dev, &sda_caseopen[0].dev_attr);
-       device_remove_file(dev, &sda_caseopen[1].dev_attr);
+/* These groups handle non-standard attributes used in this device */
+static struct attribute *w83627ehf_attrs[] = {
+
+       &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm1_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm1_target.dev_attr.attr,
+       &sensor_dev_attr_pwm1_tolerance.dev_attr.attr,
+
+       &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm2_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm2_target.dev_attr.attr,
+       &sensor_dev_attr_pwm2_tolerance.dev_attr.attr,
+
+       &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm3_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm3_target.dev_attr.attr,
+       &sensor_dev_attr_pwm3_tolerance.dev_attr.attr,
+
+       &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
+       &sensor_dev_attr_pwm4_start_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_stop_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_max_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_step_output.dev_attr.attr,
+       &sensor_dev_attr_pwm4_target.dev_attr.attr,
+       &sensor_dev_attr_pwm4_tolerance.dev_attr.attr,
+
+       &dev_attr_cpu0_vid.attr,
+       NULL
+};
 
-       device_remove_file(dev, &dev_attr_name);
-       device_remove_file(dev, &dev_attr_cpu0_vid);
-}
+static const struct attribute_group w83627ehf_group = {
+       .attrs = w83627ehf_attrs,
+       .is_visible = w83627ehf_attrs_visible,
+};
+
+static const struct attribute_group *w83627ehf_groups[] = {
+       &w83627ehf_group,
+       NULL
+};
+
+/*
+ * Driver and device management
+ */
 
 /* Get the monitoring functions started */
 static inline void w83627ehf_init_device(struct w83627ehf_data *data,
@@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
        }
 }
 
-static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
-                                  int r1, int r2)
-{
-       swap(data->temp_src[r1], data->temp_src[r2]);
-       swap(data->reg_temp[r1], data->reg_temp[r2]);
-       swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
-       swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
-       swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
-}
-
 static void
 w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
 {
@@ -1954,7 +1293,7 @@ static void
 w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
                           struct w83627ehf_data *data)
 {
-       int fan3pin, fan4pin, fan4min, fan5pin, regval;
+       int fan3pin, fan4pin, fan5pin, regval;
 
        /* The W83627UHG is simple, only two fan inputs, no config */
        if (sio_data->kind == w83627uhg) {
@@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
        }
 
        /* fan4 and fan5 share some pins with the GPIO and serial flash */
-       if (sio_data->kind == nct6775) {
-               /* On NCT6775, fan4 shares pins with the fdc interface */
-               fan3pin = 1;
-               fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
-               fan4min = 0;
-               fan5pin = 0;
-       } else if (sio_data->kind == nct6776) {
-               bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
-
-               superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
-               regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
-
-               if (regval & 0x80)
-                       fan3pin = gpok;
-               else
-                       fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-
-               if (regval & 0x40)
-                       fan4pin = gpok;
-               else
-                       fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-
-               if (regval & 0x20)
-                       fan5pin = gpok;
-               else
-                       fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
-
-               fan4min = fan4pin;
-       } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+       if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
                fan3pin = 1;
                fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
                fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
-               fan4min = fan4pin;
        } else {
                fan3pin = 1;
                fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
                fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
-               fan4min = fan4pin;
        }
 
        data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
        data->has_fan |= (fan3pin << 2);
        data->has_fan_min |= (fan3pin << 2);
 
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               /*
-                * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
-                * register
-                */
-               data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
-               data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
-       } else {
-               /*
-                * It looks like fan4 and fan5 pins can be alternatively used
-                * as fan on/off switches, but fan5 control is write only :/
-                * We assume that if the serial interface is disabled, designers
-                * connected fan5 as input unless they are emitting log 1, which
-                * is not the default.
-                */
-               regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
-               if ((regval & (1 << 2)) && fan4pin) {
-                       data->has_fan |= (1 << 3);
-                       data->has_fan_min |= (1 << 3);
+       /*
+        * It looks like fan4 and fan5 pins can be alternatively used
+        * as fan on/off switches, but fan5 control is write only :/
+        * We assume that if the serial interface is disabled, designers
+        * connected fan5 as input unless they are emitting log 1, which
+        * is not the default.
+        */
+       regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+       if ((regval & (1 << 2)) && fan4pin) {
+               data->has_fan |= (1 << 3);
+               data->has_fan_min |= (1 << 3);
+       }
+       if (!(regval & (1 << 1)) && fan5pin) {
+               data->has_fan |= (1 << 4);
+               data->has_fan_min |= (1 << 4);
+       }
+}
+
+static umode_t
+w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+                    u32 attr, int channel)
+{
+       const struct w83627ehf_data *data = drvdata;
+
+       switch (type) {
+       case hwmon_temp:
+               /* channel 0.., name 1.. */
+               if (!(data->have_temp & (1 << channel)))
+                       return 0;
+               if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+                       return 0444;
+               if (channel == 2 && data->temp3_val_only)
+                       return 0;
+               if (attr == hwmon_temp_max) {
+                       if (data->reg_temp_over[channel])
+                               return 0644;
+                       else
+                               return 0;
+               }
+               if (attr == hwmon_temp_max_hyst) {
+                       if (data->reg_temp_hyst[channel])
+                               return 0644;
+                       else
+                               return 0;
+               }
+               if (channel > 2)
+                       return 0;
+               if (attr == hwmon_temp_alarm || attr == hwmon_temp_type)
+                       return 0444;
+               if (attr == hwmon_temp_offset) {
+                       if (data->have_temp_offset & (1 << channel))
+                               return 0644;
+                       else
+                               return 0;
+               }
+               break;
+
+       case hwmon_fan:
+               /* channel 0.., name 1.. */
+               if (!(data->has_fan & (1 << channel)))
+                       return 0;
+               if (attr == hwmon_fan_input || attr == hwmon_fan_alarm)
+                       return 0444;
+               if (attr == hwmon_fan_div) {
+                       return 0444;
                }
-               if (!(regval & (1 << 1)) && fan5pin) {
-                       data->has_fan |= (1 << 4);
-                       data->has_fan_min |= (1 << 4);
+               if (attr == hwmon_fan_min) {
+                       if (data->has_fan_min & (1 << channel))
+                               return 0644;
+                       else
+                               return 0;
+               }
+               break;
+
+       case hwmon_in:
+               /* channel 0.., name 0.. */
+               if (channel >= data->in_num)
+                       return 0;
+               if (channel == 6 && data->in6_skip)
+                       return 0;
+               if (attr == hwmon_in_alarm || attr == hwmon_in_input)
+                       return 0444;
+               if (attr == hwmon_in_min || attr == hwmon_in_max)
+                       return 0644;
+               break;
+
+       case hwmon_pwm:
+               /* channel 0.., name 1.. */
+               if (!(data->has_fan & (1 << channel)) ||
+                   channel >= data->pwm_num)
+                       return 0;
+               if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable ||
+                   attr == hwmon_pwm_input)
+                       return 0644;
+               break;
+
+       case hwmon_intrusion:
+               return 0644;
+
+       default: /* Shouldn't happen */
+               return 0;
+       }
+
+       return 0; /* Shouldn't happen */
+}
+
+static int
+w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
+                      int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_temp_input:
+               *val = LM75_TEMP_FROM_REG(data->temp[channel]);
+               return 0;
+       case hwmon_temp_max:
+               *val = LM75_TEMP_FROM_REG(data->temp_max[channel]);
+               return 0;
+       case hwmon_temp_max_hyst:
+               *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]);
+               return 0;
+       case hwmon_temp_offset:
+               *val = data->temp_offset[channel] * 1000;
+               return 0;
+       case hwmon_temp_type:
+               *val = (int)data->temp_type[channel];
+               return 0;
+       case hwmon_temp_alarm:
+               if (channel < 3) {
+                       int bit[] = { 4, 5, 13 };
+                       *val = (data->alarms >> bit[channel]) & 1;
+                       return 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
+                    int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_in_input:
+               *val = in_from_reg(data->in[channel], channel, data->scale_in);
+               return 0;
+       case hwmon_in_min:
+               *val = in_from_reg(data->in_min[channel], channel,
+                                  data->scale_in);
+               return 0;
+       case hwmon_in_max:
+               *val = in_from_reg(data->in_max[channel], channel,
+                                  data->scale_in);
+               return 0;
+       case hwmon_in_alarm:
+               if (channel < 10) {
+                       int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+                       *val = (data->alarms >> bit[channel]) & 1;
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
+                     int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_fan_input:
+               *val = data->rpm[channel];
+               return 0;
+       case hwmon_fan_min:
+               *val = fan_from_reg8(data->fan_min[channel],
+                                    data->fan_div[channel]);
+               return 0;
+       case hwmon_fan_div:
+               *val = div_from_reg(data->fan_div[channel]);
+               return 0;
+       case hwmon_fan_alarm:
+               if (channel < 5) {
+                       int bit[] = { 6, 7, 11, 10, 23 };
+                       *val = (data->alarms >> bit[channel]) & 1;
+                       return 0;
                }
+               break;
+       default:
+               break;
        }
+       return -EOPNOTSUPP;
 }
 
+static int
+w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr,
+                     int channel, long *val)
+{
+       switch (attr) {
+       case hwmon_pwm_input:
+               *val = data->pwm[channel];
+               return 0;
+       case hwmon_pwm_enable:
+               *val = data->pwm_enable[channel];
+               return 0;
+       case hwmon_pwm_mode:
+               *val = data->pwm_enable[channel];
+               return 0;
+       default:
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr,
+                           int channel, long *val)
+{
+       if (attr != hwmon_intrusion_alarm || channel != 0)
+               return -EOPNOTSUPP; /* shouldn't happen */
+
+       *val = !!(data->caseopen & 0x10);
+       return 0;
+}
+
+static int
+w83627ehf_read(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long *val)
+{
+       struct w83627ehf_data *data = w83627ehf_update_device(dev->parent);
+
+       switch (type) {
+       case hwmon_fan:
+               return w83627ehf_do_read_fan(data, attr, channel, val);
+
+       case hwmon_in:
+               return w83627ehf_do_read_in(data, attr, channel, val);
+
+       case hwmon_pwm:
+               return w83627ehf_do_read_pwm(data, attr, channel, val);
+
+       case hwmon_temp:
+               return w83627ehf_do_read_temp(data, attr, channel, val);
+
+       case hwmon_intrusion:
+               return w83627ehf_do_read_intrusion(data, attr, channel, val);
+
+       default:
+               break;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type,
+                     u32 attr, int channel, const char **str)
+{
+       struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+       switch (type) {
+       case hwmon_temp:
+               if (attr == hwmon_temp_label) {
+                       *str = data->temp_label[data->temp_src[channel]];
+                       return 0;
+               }
+               break;
+
+       default:
+               break;
+       }
+       /* Nothing else should be read as a string */
+       return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_write(struct device *dev, enum hwmon_sensor_types type,
+                       u32 attr, int channel, long val)
+{
+       struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+       if (type == hwmon_in && attr == hwmon_in_min)
+               return store_in_min(dev, data, channel, val);
+       if (type == hwmon_in && attr == hwmon_in_max)
+               return store_in_max(dev, data, channel, val);
+
+       if (type == hwmon_fan && attr == hwmon_fan_min)
+               return store_fan_min(dev, data, channel, val);
+
+       if (type == hwmon_temp && attr == hwmon_temp_max)
+               return store_temp_max(dev, data, channel, val);
+       if (type == hwmon_temp && attr == hwmon_temp_max_hyst)
+               return store_temp_max_hyst(dev, data, channel, val);
+       if (type == hwmon_temp && attr == hwmon_temp_offset)
+               return store_temp_offset(dev, data, channel, val);
+
+       if (type == hwmon_pwm && attr == hwmon_pwm_mode)
+               return store_pwm_mode(dev, data, channel, val);
+       if (type == hwmon_pwm && attr == hwmon_pwm_enable)
+               return store_pwm_enable(dev, data, channel, val);
+       if (type == hwmon_pwm && attr == hwmon_pwm_input)
+               return store_pwm(dev, data, channel, val);
+
+       if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm)
+               return clear_caseopen(dev, data, channel, val);
+
+       return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops w83627ehf_ops = {
+       .is_visible = w83627ehf_is_visible,
+       .read = w83627ehf_read,
+       .read_string = w83627ehf_read_string,
+       .write = w83627ehf_write,
+};
+
+static const struct hwmon_channel_info *w83627ehf_info[] = {
+       HWMON_CHANNEL_INFO(fan,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+               HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN),
+       HWMON_CHANNEL_INFO(in,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+               HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN),
+       HWMON_CHANNEL_INFO(pwm,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+               HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE),
+       HWMON_CHANNEL_INFO(temp,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+               HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+                       HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE),
+       HWMON_CHANNEL_INFO(intrusion,
+               HWMON_INTRUSION_ALARM),
+       NULL
+};
+
+static const struct hwmon_chip_info w83627ehf_chip_info = {
+       .ops = &w83627ehf_ops,
+       .info = w83627ehf_info,
+};
+
 static int w83627ehf_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
        struct resource *res;
        u8 en_vrm10;
        int i, err = 0;
+       struct device *hwmon_dev;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
        if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
@@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev)
 
        /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
        data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
-       /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+       /* 667HG has 3 pwms, and 627UHG has only 2 */
        switch (sio_data->kind) {
        default:
                data->pwm_num = 4;
                break;
        case w83667hg:
        case w83667hg_b:
-       case nct6775:
-       case nct6776:
                data->pwm_num = 3;
                break;
        case w83627uhg:
@@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
        data->have_temp = 0x07;
 
        /* Deal with temperature register setup first. */
-       if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
-               int mask = 0;
-
-               /*
-                * Display temperature sensor output only if it monitors
-                * a source other than one already reported. Always display
-                * first three temperature registers, though.
-                */
-               for (i = 0; i < NUM_REG_TEMP; i++) {
-                       u8 src;
-
-                       data->reg_temp[i] = NCT6775_REG_TEMP[i];
-                       data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
-                       data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
-                       data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
-
-                       src = w83627ehf_read_value(data,
-                                                  NCT6775_REG_TEMP_SOURCE[i]);
-                       src &= 0x1f;
-                       if (src && !(mask & (1 << src))) {
-                               data->have_temp |= 1 << i;
-                               mask |= 1 << src;
-                       }
-
-                       data->temp_src[i] = src;
-
-                       /*
-                        * Now do some register swapping if index 0..2 don't
-                        * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
-                        * Idea is to have the first three attributes
-                        * report SYSTIN, CPUIN, and AUXIN if possible
-                        * without overriding the basic system configuration.
-                        */
-                       if (i > 0 && data->temp_src[0] != 1
-                           && data->temp_src[i] == 1)
-                               w82627ehf_swap_tempreg(data, 0, i);
-                       if (i > 1 && data->temp_src[1] != 2
-                           && data->temp_src[i] == 2)
-                               w82627ehf_swap_tempreg(data, 1, i);
-                       if (i > 2 && data->temp_src[2] != 3
-                           && data->temp_src[i] == 3)
-                               w82627ehf_swap_tempreg(data, 2, i);
-               }
-               if (sio_data->kind == nct6776) {
-                       /*
-                        * On NCT6776, AUXTIN and VIN3 pins are shared.
-                        * Only way to detect it is to check if AUXTIN is used
-                        * as a temperature source, and if that source is
-                        * enabled.
-                        *
-                        * If that is the case, disable in6, which reports VIN3.
-                        * Otherwise disable temp3.
-                        */
-                       if (data->temp_src[2] == 3) {
-                               u8 reg;
-
-                               if (data->reg_temp_config[2])
-                                       reg = w83627ehf_read_value(data,
-                                               data->reg_temp_config[2]);
-                               else
-                                       reg = 0; /* Assume AUXTIN is used */
-
-                               if (reg & 0x01)
-                                       data->have_temp &= ~(1 << 2);
-                               else
-                                       data->in6_skip = 1;
-                       }
-                       data->temp_label = nct6776_temp_label;
-               } else {
-                       data->temp_label = nct6775_temp_label;
-               }
-               data->have_temp_offset = data->have_temp & 0x07;
-               for (i = 0; i < 3; i++) {
-                       if (data->temp_src[i] > 3)
-                               data->have_temp_offset &= ~(1 << i);
-               }
-       } else if (sio_data->kind == w83667hg_b) {
+       if (sio_data->kind == w83667hg_b) {
                u8 reg;
 
                w83627ehf_set_temp_reg_ehf(data, 4);
@@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
                data->have_temp_offset = data->have_temp & 0x07;
        }
 
-       if (sio_data->kind == nct6775) {
-               data->has_fan_div = true;
-               data->fan_from_reg = fan_from_reg16;
-               data->fan_from_reg_min = fan_from_reg8;
-               data->REG_PWM = NCT6775_REG_PWM;
-               data->REG_TARGET = NCT6775_REG_TARGET;
-               data->REG_FAN = NCT6775_REG_FAN;
-               data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
-               data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
-               data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
-       } else if (sio_data->kind == nct6776) {
-               data->has_fan_div = false;
-               data->fan_from_reg = fan_from_reg13;
-               data->fan_from_reg_min = fan_from_reg13;
-               data->REG_PWM = NCT6775_REG_PWM;
-               data->REG_TARGET = NCT6775_REG_TARGET;
-               data->REG_FAN = NCT6775_REG_FAN;
-               data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
-       } else if (sio_data->kind == w83667hg_b) {
-               data->has_fan_div = true;
-               data->fan_from_reg = fan_from_reg8;
-               data->fan_from_reg_min = fan_from_reg8;
-               data->REG_PWM = W83627EHF_REG_PWM;
-               data->REG_TARGET = W83627EHF_REG_TARGET;
-               data->REG_FAN = W83627EHF_REG_FAN;
-               data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
+       if (sio_data->kind == w83667hg_b) {
                data->REG_FAN_MAX_OUTPUT =
                  W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
                data->REG_FAN_STEP_OUTPUT =
                  W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
        } else {
-               data->has_fan_div = true;
-               data->fan_from_reg = fan_from_reg8;
-               data->fan_from_reg_min = fan_from_reg8;
-               data->REG_PWM = W83627EHF_REG_PWM;
-               data->REG_TARGET = W83627EHF_REG_TARGET;
-               data->REG_FAN = W83627EHF_REG_FAN;
-               data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
-               data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
-               data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
-               data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
                data->REG_FAN_MAX_OUTPUT =
                  W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
                data->REG_FAN_STEP_OUTPUT =
@@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
                goto exit_release;
 
        /* Read VID value */
-       if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
-           sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+       if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
                /*
                 * W83667HG has different pins for VID input and output, so
                 * we can get the VID input values directly at logical device D
@@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev)
                 */
                superio_select(sio_data->sioreg, W83667HG_LD_VID);
                data->vid = superio_inb(sio_data->sioreg, 0xe3);
-               err = device_create_file(dev, &dev_attr_cpu0_vid);
-               if (err) {
-                       superio_exit(sio_data->sioreg);
-                       goto exit_release;
-               }
+               data->have_vid = true;
        } else if (sio_data->kind != w83627uhg) {
                superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
                if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
@@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev)
                                                SIO_REG_VID_DATA);
                        if (sio_data->kind == w83627ehf) /* 6 VID pins only */
                                data->vid &= 0x3f;
-
-                       err = device_create_file(dev, &dev_attr_cpu0_vid);
-                       if (err) {
-                               superio_exit(sio_data->sioreg);
-                               goto exit_release;
-                       }
+                       data->have_vid = true;
                } else {
                        dev_info(dev,
                                 "VID pins in output mode, CPU VID not available\n");
                }
        }
 
-       if (fan_debounce &&
-           (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
-               u8 tmp;
-
-               superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
-               tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
-               if (sio_data->kind == nct6776)
-                       superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
-                                    0x3e | tmp);
-               else
-                       superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
-                                    0x1e | tmp);
-               pr_info("Enabled fan debounce for chip %s\n", data->name);
-       }
-
        w83627ehf_check_fan_inputs(sio_data, data);
 
        superio_exit(sio_data->sioreg);
 
        /* Read fan clock dividers immediately */
-       w83627ehf_update_fan_div_common(dev, data);
+       w83627ehf_update_fan_div(data);
 
        /* Read pwm data to save original values */
-       w83627ehf_update_pwm_common(dev, data);
+       w83627ehf_update_pwm(data);
        for (i = 0; i < data->pwm_num; i++)
                data->pwm_enable_orig[i] = data->pwm_enable[i];
 
-       /* Register sysfs hooks */
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
-               err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
-               struct sensor_device_attribute *attr =
-                 &sda_sf3_max_step_arrays[i];
-               if (data->REG_FAN_STEP_OUTPUT &&
-                   data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
-                       err = device_create_file(dev, &attr->dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       }
-       /* if fan3 and fan4 are enabled create the sf3 files for them */
-       if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
-               for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
-                       err = device_create_file(dev,
-                                       &sda_sf3_arrays_fan3[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
-               for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
-                       err = device_create_file(dev,
-                                       &sda_sf3_arrays_fan4[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-
-       for (i = 0; i < data->in_num; i++) {
-               if ((i == 6) && data->in6_skip)
-                       continue;
-               if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_in_alarm[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_in_min[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_in_max[i].dev_attr)))
-                       goto exit_remove;
-       }
-
-       for (i = 0; i < 5; i++) {
-               if (data->has_fan & (1 << i)) {
-                       if ((err = device_create_file(dev,
-                                       &sda_fan_input[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_fan_alarm[i].dev_attr)))
-                               goto exit_remove;
-                       if (sio_data->kind != nct6776) {
-                               err = device_create_file(dev,
-                                               &sda_fan_div[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       if (data->has_fan_min & (1 << i)) {
-                               err = device_create_file(dev,
-                                               &sda_fan_min[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       if (i < data->pwm_num &&
-                               ((err = device_create_file(dev,
-                                       &sda_pwm[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_pwm_mode[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_pwm_enable[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_target_temp[i].dev_attr))
-                               || (err = device_create_file(dev,
-                                       &sda_tolerance[i].dev_attr))))
-                               goto exit_remove;
-               }
-       }
-
-       for (i = 0; i < NUM_REG_TEMP; i++) {
-               if (!(data->have_temp & (1 << i)))
-                       continue;
-               err = device_create_file(dev, &sda_temp_input[i].dev_attr);
-               if (err)
-                       goto exit_remove;
-               if (data->temp_label) {
-                       err = device_create_file(dev,
-                                                &sda_temp_label[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (i == 2 && data->temp3_val_only)
-                       continue;
-               if (data->reg_temp_over[i]) {
-                       err = device_create_file(dev,
-                               &sda_temp_max[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->reg_temp_hyst[i]) {
-                       err = device_create_file(dev,
-                               &sda_temp_max_hyst[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (i > 2)
-                       continue;
-               if ((err = device_create_file(dev,
-                               &sda_temp_alarm[i].dev_attr))
-                       || (err = device_create_file(dev,
-                               &sda_temp_type[i].dev_attr)))
-                       goto exit_remove;
-               if (data->have_temp_offset & (1 << i)) {
-                       err = device_create_file(dev,
-                                                &sda_temp_offset[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       }
-
-       err = device_create_file(dev, &sda_caseopen[0].dev_attr);
-       if (err)
-               goto exit_remove;
-
-       if (sio_data->kind == nct6776) {
-               err = device_create_file(dev, &sda_caseopen[1].dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+                                                        data->name,
+                                                        data,
+                                                        &w83627ehf_chip_info,
+                                                        w83627ehf_groups);
 
-       err = device_create_file(dev, &dev_attr_name);
-       if (err)
-               goto exit_remove;
-
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 
-       return 0;
-
-exit_remove:
-       w83627ehf_device_remove_files(dev);
 exit_release:
        release_region(res->start, IOREGION_LENGTH);
 exit:
@@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev)
 {
        struct w83627ehf_data *data = platform_get_drvdata(pdev);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       w83627ehf_device_remove_files(&pdev->dev);
        release_region(data->addr, IOREGION_LENGTH);
 
        return 0;
@@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev)
 static int w83627ehf_suspend(struct device *dev)
 {
        struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        mutex_lock(&data->update_lock);
        data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
-       if (sio_data->kind == nct6775) {
-               data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
-               data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
-       }
        mutex_unlock(&data->update_lock);
 
        return 0;
@@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev)
 static int w83627ehf_resume(struct device *dev)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        int i;
 
        mutex_lock(&data->update_lock);
@@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev)
                if (!(data->has_fan_min & (1 << i)))
                        continue;
 
-               w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+               w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i],
                                      data->fan_min[i]);
        }
 
@@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev)
 
        /* Restore other settings */
        w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
-       if (sio_data->kind == nct6775) {
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
-               w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
-       }
 
        /* Force re-reading all values */
        data->valid = 0;
@@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
        static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
        static const char sio_name_W83667HG[] __initconst = "W83667HG";
        static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
-       static const char sio_name_NCT6775[] __initconst = "NCT6775F";
-       static const char sio_name_NCT6776[] __initconst = "NCT6776F";
 
        u16 val;
        const char *sio_name;
@@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
                sio_data->kind = w83667hg_b;
                sio_name = sio_name_W83667HG_B;
                break;
-       case SIO_NCT6775_ID:
-               sio_data->kind = nct6775;
-               sio_name = sio_name_NCT6775;
-               break;
-       case SIO_NCT6776_ID:
-               sio_data->kind = nct6776;
-               sio_name = sio_name_NCT6776;
-               break;
        default:
                if (val != 0xffff)
                        pr_debug("unsupported chip ID: 0x%04x\n", val);
index dc3f507e7562d016c4e27a5d78f7ffecb8ab6542..a90d757f704319e36832e8cafa270e7967ebabcd 100644 (file)
@@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
        drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
 }
 
-#ifdef CONFIG_CPU_PM
 static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
 {
        int i, ret = 0;
@@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = {
 
 static int etm4_cpu_pm_register(void)
 {
-       return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+       if (IS_ENABLED(CONFIG_CPU_PM))
+               return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+
+       return 0;
 }
 
 static void etm4_cpu_pm_unregister(void)
 {
-       cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+       if (IS_ENABLED(CONFIG_CPU_PM))
+               cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
 }
-#else
-static int etm4_cpu_pm_register(void) { return 0; }
-static void etm4_cpu_pm_unregister(void) { }
-#endif
 
 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 {
index 0dfd97bbde9eecc4931ca43ab28496ce221fa447..ca232ec565e83a6db9dd3408b312d20e6bd556f5 100644 (file)
@@ -834,9 +834,6 @@ static irqreturn_t intel_th_irq(int irq, void *data)
                        ret |= d->irq(th->thdev[i]);
        }
 
-       if (ret == IRQ_NONE)
-               pr_warn_ratelimited("nobody cared for irq\n");
-
        return ret;
 }
 
@@ -887,6 +884,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
 
                        if (th->irq == -1)
                                th->irq = devres[r].start;
+                       th->num_irqs++;
                        break;
                default:
                        dev_warn(dev, "Unknown resource type %lx\n",
@@ -940,6 +938,9 @@ void intel_th_free(struct intel_th *th)
 
        th->num_thdevs = 0;
 
+       for (i = 0; i < th->num_irqs; i++)
+               devm_free_irq(th->dev, th->irq + i, th);
+
        pm_runtime_get_sync(th->dev);
        pm_runtime_forbid(th->dev);
 
index 0df480072b6ca01321af79967f22e7a63b343333..6f4f5486fe6dcccb3f9570cf3c0eb7da24b22b0c 100644 (file)
@@ -261,6 +261,7 @@ enum th_mmio_idx {
  * @num_thdevs:        number of devices in the @thdev array
  * @num_resources:     number of resources in the @resource array
  * @irq:       irq number
+ * @num_irqs:  number of IRQs is use
  * @id:                this Intel TH controller's device ID in the system
  * @major:     device node major for output devices
  */
@@ -277,6 +278,7 @@ struct intel_th {
        unsigned int            num_thdevs;
        unsigned int            num_resources;
        int                     irq;
+       int                     num_irqs;
 
        int                     id;
        int                     major;
index 6d240dfae9d9059f68737065e654bc8d369a6836..8e48c7458aa35cbbf3963462d969d2b22614ce49 100644 (file)
@@ -1676,10 +1676,13 @@ static int intel_th_msc_init(struct msc *msc)
        return 0;
 }
 
-static void msc_win_switch(struct msc *msc)
+static int msc_win_switch(struct msc *msc)
 {
        struct msc_window *first;
 
+       if (list_empty(&msc->win_list))
+               return -EINVAL;
+
        first = list_first_entry(&msc->win_list, struct msc_window, entry);
 
        if (msc_is_last_win(msc->cur_win))
@@ -1691,6 +1694,8 @@ static void msc_win_switch(struct msc *msc)
        msc->base_addr = msc_win_base_dma(msc->cur_win);
 
        intel_th_trace_switch(msc->thdev);
+
+       return 0;
 }
 
 /**
@@ -2025,16 +2030,15 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
        if (val != 1)
                return -EINVAL;
 
+       ret = -EINVAL;
        mutex_lock(&msc->buf_mutex);
        /*
         * Window switch can only happen in the "multi" mode.
         * If a external buffer is engaged, they have the full
         * control over window switching.
         */
-       if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
-               ret = -ENOTSUPP;
-       else
-               msc_win_switch(msc);
+       if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
+               ret = msc_win_switch(msc);
        mutex_unlock(&msc->buf_mutex);
 
        return ret ? ret : size;
index ebf3e30e989af9745f0b5499b9c23f96f7e26fa5..e9d90b53bbc46325f199a9d684cdf9f5e12430cc 100644 (file)
@@ -204,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Comet Lake PCH-V */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        {
                /* Ice Lake NNPI */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
@@ -229,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Elkhart Lake */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index e13af4874976ef1ae493844dee464d3e0c3b6fb6..5137e62970221e1751412480fc94aed2cf2d94f4 100644 (file)
@@ -174,7 +174,7 @@ static struct at91_twi_pdata sama5d2_config = {
 
 static struct at91_twi_pdata sam9x60_config = {
        .clk_max_div = 7,
-       .clk_offset = 4,
+       .clk_offset = 3,
        .has_unre_flag = true,
        .has_alt_cmd = true,
        .has_hold_field = true,
index e01b2b57e724741c3c64853bae0a1bd094f0ab94..5ab901ad615dd0d3ed1ee369acbdcdfd413ea10f 100644 (file)
@@ -58,6 +58,7 @@ struct bcm2835_i2c_dev {
        struct i2c_adapter adapter;
        struct completion completion;
        struct i2c_msg *curr_msg;
+       struct clk *bus_clk;
        int num_msgs;
        u32 msg_err;
        u8 *msg_buf;
@@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
        struct resource *mem, *irq;
        int ret;
        struct i2c_adapter *adap;
-       struct clk *bus_clk;
        struct clk *mclk;
        u32 bus_clk_rate;
 
@@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(mclk);
        }
 
-       bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
+       i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
 
-       if (IS_ERR(bus_clk)) {
+       if (IS_ERR(i2c_dev->bus_clk)) {
                dev_err(&pdev->dev, "Could not register clock\n");
-               return PTR_ERR(bus_clk);
+               return PTR_ERR(i2c_dev->bus_clk);
        }
 
        ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
@@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
                bus_clk_rate = 100000;
        }
 
-       ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate);
+       ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
        if (ret < 0) {
                dev_err(&pdev->dev, "Could not set clock frequency\n");
                return ret;
        }
 
-       ret = clk_prepare_enable(bus_clk);
+       ret = clk_prepare_enable(i2c_dev->bus_clk);
        if (ret) {
                dev_err(&pdev->dev, "Couldn't prepare clock");
                return ret;
@@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
 static int bcm2835_i2c_remove(struct platform_device *pdev)
 {
        struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
-       struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div");
 
-       clk_rate_exclusive_put(bus_clk);
-       clk_disable_unprepare(bus_clk);
+       clk_rate_exclusive_put(i2c_dev->bus_clk);
+       clk_disable_unprepare(i2c_dev->bus_clk);
 
        free_irq(i2c_dev->irq, i2c_dev);
        i2c_del_adapter(&i2c_dev->adapter);
index ff340d7ae2e52c1442e25bceaadffa8d67dcee33..abfe3094c047d0abe6742d023816a03a2044a67f 100644 (file)
@@ -369,7 +369,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
        if (unlikely(!dev))
                return -ENOMEM;
 
-       dev->base = ioremap_nocache(res->start, resource_size(res));
+       dev->base = ioremap(res->start, resource_size(res));
        if (unlikely(!dev->base)) {
                ret = -ENXIO;
                goto err;
index 38556381f4cadb999025adfd7b97f0f791abdb4f..2f8b8050a223375b8fec23999032df2a2bd6b137 100644 (file)
@@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev)
        adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev,
                                                         "scl",
                                                         GPIOD_ASIS);
-       if (IS_ERR(adapter_data->gpio_scl))
-               return PTR_ERR(adapter_data->gpio_scl);
+       if (IS_ERR(adapter_data->gpio_scl)) {
+               ret = PTR_ERR(adapter_data->gpio_scl);
+               goto free_both;
+       }
        adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev,
                                                         "sda",
                                                         GPIOD_ASIS);
-       if (IS_ERR(adapter_data->gpio_sda))
-               return PTR_ERR(adapter_data->gpio_sda);
+       if (IS_ERR(adapter_data->gpio_sda)) {
+               ret = PTR_ERR(adapter_data->gpio_sda);
+               goto free_both;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
index 0829cb696d9d4a276bbb80038fe0c9186b8b2579..4fde74eb34a7a2614261c4e935e6b9daa7a8686d 100644 (file)
@@ -281,7 +281,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
        }
 
        /* remap the memory */
-       pmcmsptwi_data.iobase = ioremap_nocache(res->start,
+       pmcmsptwi_data.iobase = ioremap(res->start,
                                                resource_size(res));
        if (!pmcmsptwi_data.iobase) {
                dev_err(&pldev->dev,
index a98bf31d0e5c1269552fa1e45bee391ab035af91..61339c665ebdc6eb21c7938ad366583f37fad356 100644 (file)
@@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev))
+       if (!pm_runtime_enabled(&pdev->dev)) {
                ret = tegra_i2c_runtime_resume(&pdev->dev);
-       else
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "runtime resume failed\n");
+                       goto unprepare_div_clk;
+               }
+       } else {
                ret = pm_runtime_get_sync(i2c_dev->dev);
-
-       if (ret < 0) {
-               dev_err(&pdev->dev, "runtime resume failed\n");
-               goto unprepare_div_clk;
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "runtime resume failed\n");
+                       goto disable_rpm;
+               }
        }
 
        if (i2c_dev->is_multimaster_mode) {
@@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
                if (ret < 0) {
                        dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
                                ret);
-                       goto disable_rpm;
+                       goto put_rpm;
                }
        }
 
@@ -1671,11 +1675,16 @@ disable_div_clk:
        if (i2c_dev->is_multimaster_mode)
                clk_disable(i2c_dev->div_clk);
 
-disable_rpm:
-       pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
+put_rpm:
+       if (pm_runtime_enabled(&pdev->dev))
+               pm_runtime_put_sync(&pdev->dev);
+       else
                tegra_i2c_runtime_suspend(&pdev->dev);
 
+disable_rpm:
+       if (pm_runtime_enabled(&pdev->dev))
+               pm_runtime_disable(&pdev->dev);
+
 unprepare_div_clk:
        clk_unprepare(i2c_dev->div_clk);
 
@@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev)
 static int __maybe_unused tegra_i2c_suspend(struct device *dev)
 {
        struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+       int err;
 
        i2c_mark_adapter_suspended(&i2c_dev->adapter);
 
+       err = pm_runtime_force_suspend(dev);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
@@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
        if (err)
                return err;
 
+       err = pm_runtime_force_resume(dev);
+       if (err < 0)
+               return err;
+
        i2c_mark_adapter_resumed(&i2c_dev->adapter);
 
        return 0;
index 9333c865d4a9a92c780409710fa1fb9d68412fcb..35b209797d7b759e4f274d28c7d29c173d83cd39 100644 (file)
@@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
         * If we can set SDA, we will always create a STOP to ensure additional
         * pulses will do no harm. This is achieved by letting SDA follow SCL
         * half a cycle later. Check the 'incomplete_write_byte' fault injector
-        * for details.
+        * for details. Note that we must honour tsu:sto, 4us, but lets use 5us
+        * here for simplicity.
         */
        bri->set_scl(adap, scl);
-       ndelay(RECOVERY_NDELAY / 2);
+       ndelay(RECOVERY_NDELAY);
        if (bri->set_sda)
                bri->set_sda(adap, scl);
        ndelay(RECOVERY_NDELAY / 2);
@@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
                scl = !scl;
                bri->set_scl(adap, scl);
                /* Creating STOP again, see above */
-               ndelay(RECOVERY_NDELAY / 2);
+               if (scl)  {
+                       /* Honour minimum tsu:sto */
+                       ndelay(RECOVERY_NDELAY);
+               } else {
+                       /* Honour minimum tf and thd:dat */
+                       ndelay(RECOVERY_NDELAY / 2);
+               }
                if (bri->set_sda)
                        bri->set_sda(adap, scl);
                ndelay(RECOVERY_NDELAY / 2);
@@ -896,29 +903,6 @@ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address
 }
 EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
 
-/**
- * i2c_new_dummy - return a new i2c device bound to a dummy driver
- * @adapter: the adapter managing the device
- * @address: seven bit address to be used
- * Context: can sleep
- *
- * This deprecated function has the same functionality as @i2c_new_dummy_device,
- * it just returns NULL instead of an ERR_PTR in case of an error for
- * compatibility with current I2C API. It will be removed once all users are
- * converted.
- *
- * This returns the new i2c client, which should be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
- */
-struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
-{
-       struct i2c_client *ret;
-
-       ret = i2c_new_dummy_device(adapter, address);
-       return IS_ERR(ret) ? NULL : ret;
-}
-EXPORT_SYMBOL_GPL(i2c_new_dummy);
-
 struct i2c_dummy_devres {
        struct i2c_client *client;
 };
index 043691656245839614e458d4367151141c7c3568..7f8f896fa0c38bb710b21560368c44d4ce94a979 100644 (file)
@@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = {
        .groups = i3c_masterdev_groups,
 };
 
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
-                    unsigned long max_i2c_scl_rate)
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+                           unsigned long max_i2c_scl_rate)
 {
        struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
 
index b0ff0e12d84ca0faa8a5a5f2ebfcc7b53cdbb1e2..bd26c3b9634ebd420c648ec61e9f5b374411f509 100644 (file)
@@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
        struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
        struct i3c_master_controller *m = i3c_dev_get_master(dev);
        struct dw_i3c_master *master = to_dw_i3c_master(m);
+       int pos;
+
+       pos = dw_i3c_master_get_free_pos(master);
+
+       if (data->index > pos && pos > 0) {
+               writel(0,
+                      master->regs +
+                      DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+               master->addrs[data->index] = 0;
+               master->free_pos |= BIT(data->index);
+
+               data->index = pos;
+               master->addrs[pos] = dev->info.dyn_addr;
+               master->free_pos &= ~BIT(pos);
+       }
 
        writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
               master->regs +
@@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
 static int dw_i3c_probe(struct platform_device *pdev)
 {
        struct dw_i3c_master *master;
-       struct resource *res;
        int ret, irq;
 
        master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
        if (!master)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       master->regs = devm_ioremap_resource(&pdev->dev, res);
+       master->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(master->regs))
                return PTR_ERR(master->regs);
 
index 10db0bf0655a9f71938f146f802857eac4c9c329..54712793709e6e62d9d151eaf3fd7b40897887aa 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
+#include <linux/of_device.h>
 
 #define DEV_ID                         0x0
 #define DEV_ID_I3C_MASTER              0x5034
@@ -60,6 +61,7 @@
 #define CTRL_HALT_EN                   BIT(30)
 #define CTRL_MCS                       BIT(29)
 #define CTRL_MCS_EN                    BIT(28)
+#define CTRL_THD_DELAY(x)              (((x) << 24) & GENMASK(25, 24))
 #define CTRL_HJ_DISEC                  BIT(8)
 #define CTRL_MST_ACK                   BIT(7)
 #define CTRL_HJ_ACK                    BIT(6)
@@ -70,6 +72,7 @@
 #define CTRL_MIXED_FAST_BUS_MODE       2
 #define CTRL_MIXED_SLOW_BUS_MODE       3
 #define CTRL_BUS_MODE_MASK             GENMASK(1, 0)
+#define THD_DELAY_MAX                  3
 
 #define PRESCL_CTRL0                   0x14
 #define PRESCL_CTRL0_I2C(x)            ((x) << 16)
@@ -388,6 +391,10 @@ struct cdns_i3c_xfer {
        struct cdns_i3c_cmd cmds[0];
 };
 
+struct cdns_i3c_data {
+       u8 thd_delay_ns;
+};
+
 struct cdns_i3c_master {
        struct work_struct hj_work;
        struct i3c_master_controller base;
@@ -408,6 +415,7 @@ struct cdns_i3c_master {
        struct clk *pclk;
        struct cdns_i3c_master_caps caps;
        unsigned long i3c_scl_lim;
+       const struct cdns_i3c_data *devdata;
 };
 
 static inline struct cdns_i3c_master *
@@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
        return 0;
 }
 
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+       unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+       u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+                                   (NSEC_PER_SEC / sysclk_rate));
+
+       /* Every value greater than 3 is not valid. */
+       if (thd_delay > THD_DELAY_MAX)
+               thd_delay = THD_DELAY_MAX;
+
+       /* CTLR_THD_DEL value is encoded. */
+       return (THD_DELAY_MAX - thd_delay);
+}
+
 static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
 {
        struct cdns_i3c_master *master = to_cdns_i3c_master(m);
@@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
         * We will issue ENTDAA afterwards from the threaded IRQ handler.
         */
        ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+       /*
+        * Configure data hold delay based on device-specific data.
+        *
+        * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+        * master output. This setting allows to meet this timing on master's
+        * SoC outputs, regardless of PCB balancing.
+        */
+       ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
        writel(ctrl, master->regs + CTRL);
 
        cdns_i3c_master_enable(master);
@@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work)
        i3c_master_do_daa(&master->base);
 }
 
+static struct cdns_i3c_data cdns_i3c_devdata = {
+       .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+       { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+       { /* sentinel */ },
+};
+
 static int cdns_i3c_master_probe(struct platform_device *pdev)
 {
        struct cdns_i3c_master *master;
-       struct resource *res;
        int ret, irq;
        u32 val;
 
@@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
        if (!master)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       master->regs = devm_ioremap_resource(&pdev->dev, res);
+       master->devdata = of_device_get_match_data(&pdev->dev);
+       if (!master->devdata)
+               return -EINVAL;
+
+       master->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(master->regs))
                return PTR_ERR(master->regs);
 
@@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id cdns_i3c_master_of_ids[] = {
-       { .compatible = "cdns,i3c-master" },
-       { /* sentinel */ },
-};
-
 static struct platform_driver cdns_i3c_master = {
        .probe = cdns_i3c_master_probe,
        .remove = cdns_i3c_master_remove,
index 75fd2a7b084229cda566449127bffe297462214f..9f38ff02a7b838ca6345b789099773983dfd2e2b 100644 (file)
@@ -41,6 +41,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/acpi.h>
 #include <linux/kernel.h>
 #include <linux/cpuidle.h>
 #include <linux/tick.h>
@@ -79,6 +80,7 @@ struct idle_cpu {
        unsigned long auto_demotion_disable_flags;
        bool byt_auto_demotion_disable_flag;
        bool disable_promotion_to_c1e;
+       bool use_acpi;
 };
 
 static const struct idle_cpu *icpu;
@@ -89,6 +91,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
                              struct cpuidle_driver *drv, int index);
 static struct cpuidle_state *cpuidle_state_table;
 
+/*
+ * Enable this state by default even if the ACPI _CST does not list it.
+ */
+#define CPUIDLE_FLAG_ALWAYS_ENABLE     BIT(15)
+
 /*
  * Set this flag for states where the HW flushes the TLB for us
  * and so we don't need cross-calls to keep it consistent.
@@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 80,
                .enter = &intel_idle,
@@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 250,
                .enter = &intel_idle,
@@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 500,
                .enter = &intel_idle,
@@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x01",
-               .flags = MWAIT2flg(0x01),
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
                .exit_latency = 10,
                .target_residency = 20,
                .enter = &intel_idle,
@@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev,
        mwait_idle_with_hints(eax, ecx);
 }
 
-static void __setup_broadcast_timer(bool on)
-{
-       if (on)
-               tick_broadcast_enable();
-       else
-               tick_broadcast_disable();
-}
-
-static void auto_demotion_disable(void)
-{
-       unsigned long long msr_bits;
-
-       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-       msr_bits &= ~(icpu->auto_demotion_disable_flags);
-       wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-}
-static void c1e_promotion_disable(void)
-{
-       unsigned long long msr_bits;
-
-       rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
-       msr_bits &= ~0x2;
-       wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
-}
-
 static const struct idle_cpu idle_cpu_nehalem = {
        .state_table = nehalem_cstates,
        .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_nhx = {
+       .state_table = nehalem_cstates,
+       .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+       .disable_promotion_to_c1e = true,
+       .use_acpi = true,
+};
+
 static const struct idle_cpu idle_cpu_atom = {
        .state_table = atom_cstates,
 };
@@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = {
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_snx = {
+       .state_table = snb_cstates,
+       .disable_promotion_to_c1e = true,
+       .use_acpi = true,
+};
+
 static const struct idle_cpu idle_cpu_byt = {
        .state_table = byt_cstates,
        .disable_promotion_to_c1e = true,
@@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = {
 static const struct idle_cpu idle_cpu_ivt = {
        .state_table = ivt_cstates,
        .disable_promotion_to_c1e = true,
+       .use_acpi = true,
 };
 
 static const struct idle_cpu idle_cpu_hsw = {
@@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = {
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_hsx = {
+       .state_table = hsw_cstates,
+       .disable_promotion_to_c1e = true,
+       .use_acpi = true,
+};
+
 static const struct idle_cpu idle_cpu_bdw = {
        .state_table = bdw_cstates,
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_bdx = {
+       .state_table = bdw_cstates,
+       .disable_promotion_to_c1e = true,
+       .use_acpi = true,
+};
+
 static const struct idle_cpu idle_cpu_skl = {
        .state_table = skl_cstates,
        .disable_promotion_to_c1e = true,
@@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = {
 static const struct idle_cpu idle_cpu_skx = {
        .state_table = skx_cstates,
        .disable_promotion_to_c1e = true,
+       .use_acpi = true,
 };
 
 static const struct idle_cpu idle_cpu_avn = {
        .state_table = avn_cstates,
        .disable_promotion_to_c1e = true,
+       .use_acpi = true,
 };
 
 static const struct idle_cpu idle_cpu_knl = {
        .state_table = knl_cstates,
+       .use_acpi = true,
 };
 
 static const struct idle_cpu idle_cpu_bxt = {
@@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = {
 static const struct idle_cpu idle_cpu_dnv = {
        .state_table = dnv_cstates,
        .disable_promotion_to_c1e = true,
+       .use_acpi = true,
 };
 
 static const struct x86_cpu_id intel_idle_ids[] __initconst = {
-       INTEL_CPU_FAM6(NEHALEM_EP,              idle_cpu_nehalem),
+       INTEL_CPU_FAM6(NEHALEM_EP,              idle_cpu_nhx),
        INTEL_CPU_FAM6(NEHALEM,                 idle_cpu_nehalem),
        INTEL_CPU_FAM6(NEHALEM_G,               idle_cpu_nehalem),
        INTEL_CPU_FAM6(WESTMERE,                idle_cpu_nehalem),
-       INTEL_CPU_FAM6(WESTMERE_EP,             idle_cpu_nehalem),
-       INTEL_CPU_FAM6(NEHALEM_EX,              idle_cpu_nehalem),
+       INTEL_CPU_FAM6(WESTMERE_EP,             idle_cpu_nhx),
+       INTEL_CPU_FAM6(NEHALEM_EX,              idle_cpu_nhx),
        INTEL_CPU_FAM6(ATOM_BONNELL,            idle_cpu_atom),
        INTEL_CPU_FAM6(ATOM_BONNELL_MID,        idle_cpu_lincroft),
-       INTEL_CPU_FAM6(WESTMERE_EX,             idle_cpu_nehalem),
+       INTEL_CPU_FAM6(WESTMERE_EX,             idle_cpu_nhx),
        INTEL_CPU_FAM6(SANDYBRIDGE,             idle_cpu_snb),
-       INTEL_CPU_FAM6(SANDYBRIDGE_X,           idle_cpu_snb),
+       INTEL_CPU_FAM6(SANDYBRIDGE_X,           idle_cpu_snx),
        INTEL_CPU_FAM6(ATOM_SALTWELL,           idle_cpu_atom),
        INTEL_CPU_FAM6(ATOM_SILVERMONT,         idle_cpu_byt),
        INTEL_CPU_FAM6(ATOM_SILVERMONT_MID,     idle_cpu_tangier),
@@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        INTEL_CPU_FAM6(IVYBRIDGE,               idle_cpu_ivb),
        INTEL_CPU_FAM6(IVYBRIDGE_X,             idle_cpu_ivt),
        INTEL_CPU_FAM6(HASWELL,                 idle_cpu_hsw),
-       INTEL_CPU_FAM6(HASWELL_X,               idle_cpu_hsw),
+       INTEL_CPU_FAM6(HASWELL_X,               idle_cpu_hsx),
        INTEL_CPU_FAM6(HASWELL_L,               idle_cpu_hsw),
        INTEL_CPU_FAM6(HASWELL_G,               idle_cpu_hsw),
        INTEL_CPU_FAM6(ATOM_SILVERMONT_D,       idle_cpu_avn),
        INTEL_CPU_FAM6(BROADWELL,               idle_cpu_bdw),
        INTEL_CPU_FAM6(BROADWELL_G,             idle_cpu_bdw),
-       INTEL_CPU_FAM6(BROADWELL_X,             idle_cpu_bdw),
-       INTEL_CPU_FAM6(BROADWELL_D,             idle_cpu_bdw),
+       INTEL_CPU_FAM6(BROADWELL_X,             idle_cpu_bdx),
+       INTEL_CPU_FAM6(BROADWELL_D,             idle_cpu_bdx),
        INTEL_CPU_FAM6(SKYLAKE_L,               idle_cpu_skl),
        INTEL_CPU_FAM6(SKYLAKE,                 idle_cpu_skl),
        INTEL_CPU_FAM6(KABYLAKE_L,              idle_cpu_skl),
@@ -1095,76 +1107,169 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
        {}
 };
 
-/*
- * intel_idle_probe()
+#define INTEL_CPU_FAM6_MWAIT \
+       { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
+
+static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
+       INTEL_CPU_FAM6_MWAIT,
+       {}
+};
+
+static bool __init intel_idle_max_cstate_reached(int cstate)
+{
+       if (cstate + 1 > max_cstate) {
+               pr_info("max_cstate %d reached\n", max_cstate);
+               return true;
+       }
+       return false;
+}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+#include <acpi/processor.h>
+
+static bool no_acpi __read_mostly;
+module_param(no_acpi, bool, 0444);
+MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
+
+static struct acpi_processor_power acpi_state_table __initdata;
+
+/**
+ * intel_idle_cst_usable - Check if the _CST information can be used.
+ *
+ * Check if all of the C-states listed by _CST in the max_cstate range are
+ * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
  */
-static int __init intel_idle_probe(void)
+static bool __init intel_idle_cst_usable(void)
 {
-       unsigned int eax, ebx, ecx;
-       const struct x86_cpu_id *id;
+       int cstate, limit;
 
-       if (max_cstate == 0) {
-               pr_debug("disabled\n");
-               return -EPERM;
-       }
+       limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
+                     acpi_state_table.count);
 
-       id = x86_match_cpu(intel_idle_ids);
-       if (!id) {
-               if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-                   boot_cpu_data.x86 == 6)
-                       pr_debug("does not run on family %d model %d\n",
-                                boot_cpu_data.x86, boot_cpu_data.x86_model);
-               return -ENODEV;
+       for (cstate = 1; cstate < limit; cstate++) {
+               struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
+
+               if (cx->entry_method != ACPI_CSTATE_FFH)
+                       return false;
        }
 
-       if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
-               pr_debug("Please enable MWAIT in BIOS SETUP\n");
-               return -ENODEV;
+       return true;
+}
+
+static bool __init intel_idle_acpi_cst_extract(void)
+{
+       unsigned int cpu;
+
+       if (no_acpi) {
+               pr_debug("Not allowed to use ACPI _CST\n");
+               return false;
        }
 
-       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
-               return -ENODEV;
+       for_each_possible_cpu(cpu) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
 
-       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+               if (!pr)
+                       continue;
 
-       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
-           !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
-           !mwait_substates)
-                       return -ENODEV;
+               if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
+                       continue;
 
-       pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+               acpi_state_table.count++;
 
-       icpu = (const struct idle_cpu *)id->driver_data;
-       cpuidle_state_table = icpu->state_table;
+               if (!intel_idle_cst_usable())
+                       continue;
 
-       pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
-                boot_cpu_data.x86_model);
+               if (!acpi_processor_claim_cst_control()) {
+                       acpi_state_table.count = 0;
+                       return false;
+               }
 
-       return 0;
+               return true;
+       }
+
+       pr_debug("ACPI _CST not found or not usable\n");
+       return false;
 }
 
-/*
- * intel_idle_cpuidle_devices_uninit()
- * Unregisters the cpuidle devices.
- */
-static void intel_idle_cpuidle_devices_uninit(void)
+static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
 {
-       int i;
-       struct cpuidle_device *dev;
+       int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+
+       /*
+        * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+        * the interesting states are ACPI_CSTATE_FFH.
+        */
+       for (cstate = 1; cstate < limit; cstate++) {
+               struct acpi_processor_cx *cx;
+               struct cpuidle_state *state;
 
-       for_each_online_cpu(i) {
-               dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
-               cpuidle_unregister_device(dev);
+               if (intel_idle_max_cstate_reached(cstate))
+                       break;
+
+               cx = &acpi_state_table.states[cstate];
+
+               state = &drv->states[drv->state_count++];
+
+               snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
+               strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+               state->exit_latency = cx->latency;
+               /*
+                * For C1-type C-states use the same number for both the exit
+                * latency and target residency, because that is the case for
+                * C1 in the majority of the static C-states tables above.
+                * For the other types of C-states, however, set the target
+                * residency to 3 times the exit latency which should lead to
+                * a reasonable balance between energy-efficiency and
+                * performance in the majority of interesting cases.
+                */
+               state->target_residency = cx->latency;
+               if (cx->type > ACPI_STATE_C1)
+                       state->target_residency *= 3;
+
+               state->flags = MWAIT2flg(cx->address);
+               if (cx->type > ACPI_STATE_C2)
+                       state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+
+               state->enter = intel_idle;
+               state->enter_s2idle = intel_idle_s2idle;
        }
 }
 
+static bool __init intel_idle_off_by_default(u32 mwait_hint)
+{
+       int cstate, limit;
+
+       /*
+        * If there are no _CST C-states, do not disable any C-states by
+        * default.
+        */
+       if (!acpi_state_table.count)
+               return false;
+
+       limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+       /*
+        * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+        * the interesting states are ACPI_CSTATE_FFH.
+        */
+       for (cstate = 1; cstate < limit; cstate++) {
+               if (acpi_state_table.states[cstate].address == mwait_hint)
+                       return false;
+       }
+       return true;
+}
+#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+static inline bool intel_idle_acpi_cst_extract(void) { return false; }
+static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
+static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+
 /*
  * ivt_idle_state_table_update(void)
  *
  * Tune IVT multi-socket targets
  * Assumption: num_sockets == (max_package_num + 1)
  */
-static void ivt_idle_state_table_update(void)
+static void __init ivt_idle_state_table_update(void)
 {
        /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
        int cpu, package_num, num_sockets = 1;
@@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void)
        /* else, 1 and 2 socket systems use default ivt_cstates */
 }
 
-/*
- * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+/**
+ * irtl_2_usec - IRTL to microseconds conversion.
+ * @irtl: IRTL MSR value.
+ *
+ * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
  */
-
-static unsigned int irtl_ns_units[] = {
-       1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
-
-static unsigned long long irtl_2_usec(unsigned long long irtl)
+static unsigned long long __init irtl_2_usec(unsigned long long irtl)
 {
+       static const unsigned int irtl_ns_units[] __initconst = {
+               1, 32, 1024, 32768, 1048576, 33554432, 0, 0
+       };
        unsigned long long ns;
 
        if (!irtl)
@@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
 
        ns = irtl_ns_units[(irtl >> 10) & 0x7];
 
-       return div64_u64((irtl & 0x3FF) * ns, 1000);
+       return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
 }
+
 /*
  * bxt_idle_state_table_update(void)
  *
  * On BXT, we trust the IRTL to show the definitive maximum latency
  * We use the same value for target_residency.
  */
-static void bxt_idle_state_table_update(void)
+static void __init bxt_idle_state_table_update(void)
 {
        unsigned long long msr;
        unsigned int usec;
@@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void)
  * On SKL-H (model 0x5e) disable C8 and C9 if:
  * C10 is enabled and SGX disabled
  */
-static void sklh_idle_state_table_update(void)
+static void __init sklh_idle_state_table_update(void)
 {
        unsigned long long msr;
        unsigned int eax, ebx, ecx, edx;
@@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void)
        skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE;  /* C8-SKL */
        skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE;  /* C9-SKL */
 }
-/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
- */
 
-static void intel_idle_state_table_update(void)
+static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
 {
-       switch (boot_cpu_data.x86_model) {
+       unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
+       unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
+                                       MWAIT_SUBSTATE_MASK;
+
+       /* Ignore the C-state if there are NO sub-states in CPUID for it. */
+       if (num_substates == 0)
+               return false;
+
+       if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+               mark_tsc_unstable("TSC halts in idle states deeper than C2");
 
+       return true;
+}
+
+static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+{
+       int cstate;
+
+       switch (boot_cpu_data.x86_model) {
        case INTEL_FAM6_IVYBRIDGE_X:
                ivt_idle_state_table_update();
                break;
@@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void)
                sklh_idle_state_table_update();
                break;
        }
-}
-
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
- */
-static void __init intel_idle_cpuidle_driver_init(void)
-{
-       int cstate;
-       struct cpuidle_driver *drv = &intel_idle_driver;
-
-       intel_idle_state_table_update();
-
-       cpuidle_poll_state_init(drv);
-       drv->state_count = 1;
 
        for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
-               int num_substates, mwait_hint, mwait_cstate;
+               unsigned int mwait_hint;
 
-               if ((cpuidle_state_table[cstate].enter == NULL) &&
-                   (cpuidle_state_table[cstate].enter_s2idle == NULL))
+               if (intel_idle_max_cstate_reached(cstate))
                        break;
 
-               if (cstate + 1 > max_cstate) {
-                       pr_info("max_cstate %d reached\n", max_cstate);
+               if (!cpuidle_state_table[cstate].enter &&
+                   !cpuidle_state_table[cstate].enter_s2idle)
                        break;
-               }
-
-               mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
-               mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-
-               /* number of sub-states for this state in CPUID.MWAIT */
-               num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
-                                       & MWAIT_SUBSTATE_MASK;
-
-               /* if NO sub-states for this state in CPUID, skip it */
-               if (num_substates == 0)
-                       continue;
 
-               /* if state marked as disabled, skip it */
+               /* If marked as unusable, skip this state. */
                if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
                        pr_debug("state %s is disabled\n",
                                 cpuidle_state_table[cstate].name);
                        continue;
                }
 
+               mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+               if (!intel_idle_verify_cstate(mwait_hint))
+                       continue;
 
-               if (((mwait_cstate + 1) > 2) &&
-                       !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
-                       mark_tsc_unstable("TSC halts in idle"
-                                       " states deeper than C2");
+               /* Structure copy. */
+               drv->states[drv->state_count] = cpuidle_state_table[cstate];
 
-               drv->states[drv->state_count] = /* structure copy */
-                       cpuidle_state_table[cstate];
+               if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) &&
+                   !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))
+                       drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
 
-               drv->state_count += 1;
+               drv->state_count++;
        }
 
        if (icpu->byt_auto_demotion_disable_flag) {
@@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void)
        }
 }
 
+/*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+       cpuidle_poll_state_init(drv);
+       drv->state_count = 1;
+
+       if (icpu)
+               intel_idle_init_cstates_icpu(drv);
+       else
+               intel_idle_init_cstates_acpi(drv);
+}
+
+static void auto_demotion_disable(void)
+{
+       unsigned long long msr_bits;
+
+       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+       msr_bits &= ~(icpu->auto_demotion_disable_flags);
+       wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+}
+
+static void c1e_promotion_disable(void)
+{
+       unsigned long long msr_bits;
+
+       rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+       msr_bits &= ~0x2;
+       wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
 
 /*
  * intel_idle_cpu_init()
@@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
                return -EIO;
        }
 
+       if (!icpu)
+               return 0;
+
        if (icpu->auto_demotion_disable_flags)
                auto_demotion_disable();
 
@@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
        struct cpuidle_device *dev;
 
        if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
-               __setup_broadcast_timer(true);
+               tick_broadcast_enable();
 
        /*
         * Some systems can hotplug a cpu at runtime after
@@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu)
        return 0;
 }
 
+/**
+ * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
+ */
+static void __init intel_idle_cpuidle_devices_uninit(void)
+{
+       int i;
+
+       for_each_online_cpu(i)
+               cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
+}
+
 static int __init intel_idle_init(void)
 {
+       const struct x86_cpu_id *id;
+       unsigned int eax, ebx, ecx;
        int retval;
 
        /* Do not load intel_idle at all for now if idle= is passed */
        if (boot_option_idle_override != IDLE_NO_OVERRIDE)
                return -ENODEV;
 
-       retval = intel_idle_probe();
-       if (retval)
-               return retval;
+       if (max_cstate == 0) {
+               pr_debug("disabled\n");
+               return -EPERM;
+       }
+
+       id = x86_match_cpu(intel_idle_ids);
+       if (id) {
+               if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+                       pr_debug("Please enable MWAIT in BIOS SETUP\n");
+                       return -ENODEV;
+               }
+       } else {
+               id = x86_match_cpu(intel_mwait_ids);
+               if (!id)
+                       return -ENODEV;
+       }
+
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return -ENODEV;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+           !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+           !mwait_substates)
+                       return -ENODEV;
+
+       pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+
+       icpu = (const struct idle_cpu *)id->driver_data;
+       if (icpu) {
+               cpuidle_state_table = icpu->state_table;
+               if (icpu->use_acpi)
+                       intel_idle_acpi_cst_extract();
+       } else if (!intel_idle_acpi_cst_extract()) {
+               return -ENODEV;
+       }
+
+       pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+                boot_cpu_data.x86_model);
 
        intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-       if (intel_idle_cpuidle_devices == NULL)
+       if (!intel_idle_cpuidle_devices)
                return -ENOMEM;
 
-       intel_idle_cpuidle_driver_init();
+       intel_idle_cpuidle_driver_init(&intel_idle_driver);
+
        retval = cpuidle_register_driver(&intel_idle_driver);
        if (retval) {
                struct cpuidle_driver *drv = cpuidle_get_driver();
index 7b837641f166e6305e0cfd4c26dba6df2dd163cf..7320275c7e56dee7ee0a6bb3af4461dc0c9826ab 100644 (file)
@@ -992,6 +992,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
 #define ST_ACCEL_TRIGGER_OPS NULL
 #endif
 
+#ifdef CONFIG_ACPI
 static const struct iio_mount_matrix *
 get_mount_matrix(const struct iio_dev *indio_dev,
                 const struct iio_chan_spec *chan)
@@ -1012,7 +1013,6 @@ static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = {
 static int apply_acpi_orientation(struct iio_dev *indio_dev,
                                  struct iio_chan_spec *channels)
 {
-#ifdef CONFIG_ACPI
        struct st_sensor_data *adata = iio_priv(indio_dev);
        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
        struct acpi_device *adev;
@@ -1140,10 +1140,14 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
 out:
        kfree(buffer.pointer);
        return ret;
+}
 #else /* !CONFIG_ACPI */
+static int apply_acpi_orientation(struct iio_dev *indio_dev,
+                                 struct iio_chan_spec *channels)
+{
        return 0;
-#endif
 }
+#endif
 
 /*
  * st_accel_get_settings() - get sensor settings from device name
index edc6f1cc90b2448d037174113bbb42c048d77ff7..306bf15023a7878cd5a6e987d63e378bf455162c 100644 (file)
@@ -39,6 +39,8 @@
 #define AD7124_STATUS_POR_FLAG_MSK     BIT(4)
 
 /* AD7124_ADC_CONTROL */
+#define AD7124_ADC_CTRL_REF_EN_MSK     BIT(8)
+#define AD7124_ADC_CTRL_REF_EN(x)      FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x)
 #define AD7124_ADC_CTRL_PWR_MSK        GENMASK(7, 6)
 #define AD7124_ADC_CTRL_PWR(x)         FIELD_PREP(AD7124_ADC_CTRL_PWR_MSK, x)
 #define AD7124_ADC_CTRL_MODE_MSK       GENMASK(5, 2)
@@ -424,7 +426,10 @@ static int ad7124_init_channel_vref(struct ad7124_state *st,
                break;
        case AD7124_INT_REF:
                st->channel_config[channel_number].vref_mv = 2500;
-               break;
+               st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK;
+               st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
+               return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL,
+                                     2, st->adc_control);
        default:
                dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel);
                return -EINVAL;
@@ -489,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
                st->channel_config[channel].buf_negative =
                        of_property_read_bool(child, "adi,buffered-negative");
 
-               *chan = ad7124_channel_template;
-               chan->address = channel;
-               chan->scan_index = channel;
-               chan->channel = ain[0];
-               chan->channel2 = ain[1];
-
-               chan++;
+               chan[channel] = ad7124_channel_template;
+               chan[channel].address = channel;
+               chan[channel].scan_index = channel;
+               chan[channel].channel = ain[0];
+               chan[channel].channel2 = ain[1];
        }
 
        return 0;
index f5ba94c03a8d9d59cbd9eba8b841810bd0cf50e2..e4683a68522a4502957887e475f951e5be74e53b 100644 (file)
@@ -85,7 +85,7 @@ err_unlock:
 
 static int ad7606_read_samples(struct ad7606_state *st)
 {
-       unsigned int num = st->chip_info->num_channels;
+       unsigned int num = st->chip_info->num_channels - 1;
        u16 *data = st->data;
        int ret;
 
index 5c2b3446fa4afe8e1719c0842c1b89359433527a..2c6f60edb7ced4c6258dcc4f2791ddd80d83bb97 100644 (file)
@@ -89,6 +89,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
                                   unsigned int channel)
 {
        int ret;
+       int i;
        int bits_per_word = ad7949_adc->resolution;
        int mask = GENMASK(ad7949_adc->resolution, 0);
        struct spi_message msg;
@@ -100,12 +101,23 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
                },
        };
 
-       ret = ad7949_spi_write_cfg(ad7949_adc,
-                                  channel << AD7949_OFFSET_CHANNEL_SEL,
-                                  AD7949_MASK_CHANNEL_SEL);
-       if (ret)
-               return ret;
+       /*
+        * 1: write CFG for sample N and read old data (sample N-2)
+        * 2: if CFG was not changed since sample N-1 then we'll get good data
+        *    at the next xfer, so we bail out now, otherwise we write something
+        *    and we read garbage (sample N-1 configuration).
+        */
+       for (i = 0; i < 2; i++) {
+               ret = ad7949_spi_write_cfg(ad7949_adc,
+                                          channel << AD7949_OFFSET_CHANNEL_SEL,
+                                          AD7949_MASK_CHANNEL_SEL);
+               if (ret)
+                       return ret;
+               if (channel == ad7949_adc->current_channel)
+                       break;
+       }
 
+       /* 3: write something and read actual data */
        ad7949_adc->buffer = 0;
        spi_message_init_with_transfers(&msg, tx, 1);
        ret = spi_sync(ad7949_adc->spi, &msg);
index 67d096f8180de12f3ec960f7eb1f74394336008e..c35a1beb817c242a0f1cc3d591e848ba15da2b31 100644 (file)
@@ -185,7 +185,7 @@ static int mrfld_adc_probe(struct platform_device *pdev)
        int irq;
        int ret;
 
-       indio_dev = devm_iio_device_alloc(dev, sizeof(*indio_dev));
+       indio_dev = devm_iio_device_alloc(dev, sizeof(struct mrfld_adc));
        if (!indio_dev)
                return -ENOMEM;
 
index e171db20c04a7bbcc8d1c2745abcf5ebf4e47b2a..02834ca3e1cede38c527e4c11dc64d0740c29ad6 100644 (file)
@@ -478,7 +478,13 @@ static int max1027_probe(struct spi_device *spi)
                st->trig->ops = &max1027_trigger_ops;
                st->trig->dev.parent = &spi->dev;
                iio_trigger_set_drvdata(st->trig, indio_dev);
-               iio_trigger_register(st->trig);
+               ret = devm_iio_trigger_register(&indio_dev->dev,
+                                               st->trig);
+               if (ret < 0) {
+                       dev_err(&indio_dev->dev,
+                               "Failed to register iio trigger\n");
+                       return ret;
+               }
 
                ret = devm_request_threaded_irq(&spi->dev, spi->irq,
                                                iio_trigger_generic_data_rdy_poll,
index da073d72f649f829997f6f3ca9a43c13399aba0d..e480529b3f0497cb3369bf62d6b79f57146b2d72 100644 (file)
 #define MAX9611_TEMP_SCALE_NUM         1000000
 #define MAX9611_TEMP_SCALE_DIV         2083
 
+/*
+ * Conversion time is 2 ms (typically) at Ta=25 degreeC
+ * No maximum value is known, so play it safe.
+ */
+#define MAX9611_CONV_TIME_US_RANGE     3000, 3300
+
 struct max9611_dev {
        struct device *dev;
        struct i2c_client *i2c_client;
@@ -236,11 +242,9 @@ static int max9611_read_single(struct max9611_dev *max9611,
                return ret;
        }
 
-       /*
-        * need a delay here to make register configuration
-        * stabilize. 1 msec at least, from empirical testing.
-        */
-       usleep_range(1000, 2000);
+       /* need a delay here to make register configuration stabilize. */
+
+       usleep_range(MAX9611_CONV_TIME_US_RANGE);
 
        ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr);
        if (ret < 0) {
@@ -507,7 +511,7 @@ static int max9611_init(struct max9611_dev *max9611)
                        MAX9611_REG_CTRL2, 0);
                return ret;
        }
-       usleep_range(1000, 2000);
+       usleep_range(MAX9611_CONV_TIME_US_RANGE);
 
        return 0;
 }
index fa4586037bb8ebcfaf0d978619e5d2475bbf85f1..0b91de4df8f4609daf0ad14d80db9f3665d38d34 100644 (file)
@@ -65,6 +65,7 @@ config IAQCORE
 config PMS7003
        tristate "Plantower PMS7003 particulate matter sensor"
        depends on SERIAL_DEV_BUS
+       select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Plantower PMS7003 particulate
index 963ff043eecf97b77d7990c84f0bcc70b60915a6..7ecd2ffa3132590551591826a70c07018efc6bb9 100644 (file)
@@ -229,7 +229,7 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
                        *val2 = 65536;
                        return IIO_VAL_FRACTIONAL;
                } else {
-                       *val = 100;
+                       *val = 100000;
                        *val2 = 65536;
                        return IIO_VAL_FRACTIONAL;
                }
index 45e77b308238b98af9d97216dc2258102797ced3..0686e41bb8a1c2ac425b7e8e31858060ea83912c 100644 (file)
@@ -117,6 +117,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
                .fifo_size = 1024,
+               .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE},
        },
        {
                .whoami = INV_MPU6500_WHOAMI_VALUE,
@@ -124,6 +125,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
                .fifo_size = 512,
+               .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
        },
        {
                .whoami = INV_MPU6515_WHOAMI_VALUE,
@@ -131,6 +133,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
                .fifo_size = 512,
+               .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
        },
        {
                .whoami = INV_MPU6000_WHOAMI_VALUE,
@@ -138,6 +141,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
                .fifo_size = 1024,
+               .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE},
        },
        {
                .whoami = INV_MPU9150_WHOAMI_VALUE,
@@ -145,6 +149,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
                .fifo_size = 1024,
+               .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE},
        },
        {
                .whoami = INV_MPU9250_WHOAMI_VALUE,
@@ -152,6 +157,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
                .fifo_size = 512,
+               .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
        },
        {
                .whoami = INV_MPU9255_WHOAMI_VALUE,
@@ -159,6 +165,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
                .fifo_size = 512,
+               .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE},
        },
        {
                .whoami = INV_ICM20608_WHOAMI_VALUE,
@@ -166,6 +173,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6500,
                .config = &chip_config_6050,
                .fifo_size = 512,
+               .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
        },
        {
                .whoami = INV_ICM20602_WHOAMI_VALUE,
@@ -173,6 +181,7 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_icm20602,
                .config = &chip_config_6050,
                .fifo_size = 1008,
+               .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE},
        },
 };
 
@@ -481,12 +490,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
 
                        return IIO_VAL_INT_PLUS_MICRO;
                case IIO_TEMP:
-                       *val = 0;
-                       if (st->chip_type == INV_ICM20602)
-                               *val2 = INV_ICM20602_TEMP_SCALE;
-                       else
-                               *val2 = INV_MPU6050_TEMP_SCALE;
-
+                       *val = st->hw->temp.scale / 1000000;
+                       *val2 = st->hw->temp.scale % 1000000;
                        return IIO_VAL_INT_PLUS_MICRO;
                case IIO_MAGN:
                        return inv_mpu_magn_get_scale(st, chan, val, val2);
@@ -496,11 +501,7 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_OFFSET:
                switch (chan->type) {
                case IIO_TEMP:
-                       if (st->chip_type == INV_ICM20602)
-                               *val = INV_ICM20602_TEMP_OFFSET;
-                       else
-                               *val = INV_MPU6050_TEMP_OFFSET;
-
+                       *val = st->hw->temp.offset;
                        return IIO_VAL_INT;
                default:
                        return -EINVAL;
index f1fb7b6bdab1cb4cae12f45e5709a7fd87cda86c..b096e010d4ee38ad4c2794ab2dd3508e963d5319 100644 (file)
@@ -107,6 +107,7 @@ struct inv_mpu6050_chip_config {
  *  @reg:   register map of the chip.
  *  @config:    configuration of the chip.
  *  @fifo_size:        size of the FIFO in bytes.
+ *  @temp:     offset and scale to apply to raw temperature.
  */
 struct inv_mpu6050_hw {
        u8 whoami;
@@ -114,6 +115,10 @@ struct inv_mpu6050_hw {
        const struct inv_mpu6050_reg_map *reg;
        const struct inv_mpu6050_chip_config *config;
        size_t fifo_size;
+       struct {
+               int offset;
+               int scale;
+       } temp;
 };
 
 /*
@@ -279,16 +284,19 @@ struct inv_mpu6050_state {
 #define INV_MPU6050_REG_UP_TIME_MIN          5000
 #define INV_MPU6050_REG_UP_TIME_MAX          10000
 
-#define INV_MPU6050_TEMP_OFFSET                     12421
-#define INV_MPU6050_TEMP_SCALE               2941
+#define INV_MPU6050_TEMP_OFFSET                     12420
+#define INV_MPU6050_TEMP_SCALE               2941176
 #define INV_MPU6050_MAX_GYRO_FS_PARAM        3
 #define INV_MPU6050_MAX_ACCL_FS_PARAM        3
 #define INV_MPU6050_THREE_AXIS               3
 #define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT    3
 #define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT    3
 
-#define INV_ICM20602_TEMP_OFFSET            8170
-#define INV_ICM20602_TEMP_SCALE                     3060
+#define INV_MPU6500_TEMP_OFFSET              7011
+#define INV_MPU6500_TEMP_SCALE               2995178
+
+#define INV_ICM20608_TEMP_OFFSET            8170
+#define INV_ICM20608_TEMP_SCALE                     3059976
 
 /* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
 #define INV_MPU6050_OUTPUT_DATA_SIZE         32
index c605b153be410af4f4ad8fc4fd1aee17838989ed..dc55d7dff3eb9b762104ff26927fa674323abdb2 100644 (file)
@@ -320,7 +320,6 @@ enum st_lsm6dsx_fifo_mode {
  * @odr: Output data rate of the sensor [Hz].
  * @watermark: Sensor watermark level.
  * @sip: Number of samples in a given pattern.
- * @decimator: FIFO decimation factor.
  * @ts_ref: Sensor timestamp reference for hw one.
  * @ext_info: Sensor settings if it is connected to i2c controller
  */
@@ -334,7 +333,6 @@ struct st_lsm6dsx_sensor {
 
        u16 watermark;
        u8 sip;
-       u8 decimator;
        s64 ts_ref;
 
        struct {
@@ -351,9 +349,9 @@ struct st_lsm6dsx_sensor {
  * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO.
  * @conf_lock: Mutex to prevent concurrent FIFO configuration update.
  * @page_lock: Mutex to prevent concurrent memory page configuration.
- * @fifo_mode: FIFO operating mode supported by the device.
  * @suspend_mask: Suspended sensor bitmask.
  * @enable_mask: Enabled sensor bitmask.
+ * @fifo_mask: Enabled hw FIFO bitmask.
  * @ts_gain: Hw timestamp rate after internal calibration.
  * @ts_sip: Total number of timestamp samples in a given pattern.
  * @sip: Total number of samples (acc/gyro/ts) in a given pattern.
@@ -373,9 +371,9 @@ struct st_lsm6dsx_hw {
        struct mutex conf_lock;
        struct mutex page_lock;
 
-       enum st_lsm6dsx_fifo_mode fifo_mode;
        u8 suspend_mask;
        u8 enable_mask;
+       u8 fifo_mask;
        s64 ts_gain;
        u8 ts_sip;
        u8 sip;
index d416990ae309d7ba6e5bdb203ee9c8a4d6f51ddf..cb536b81a1c29a8a4b28c095a5c4cf39ccc2643f 100644 (file)
@@ -78,14 +78,20 @@ struct st_lsm6dsx_decimator_entry st_lsm6dsx_decimator_table[] = {
        { 32, 0x7 },
 };
 
-static int st_lsm6dsx_get_decimator_val(u8 val)
+static int
+st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr)
 {
        const int max_size = ARRAY_SIZE(st_lsm6dsx_decimator_table);
+       u32 decimator =  max_odr / sensor->odr;
        int i;
 
-       for (i = 0; i < max_size; i++)
-               if (st_lsm6dsx_decimator_table[i].decimator == val)
+       if (decimator > 1)
+               decimator = round_down(decimator, 2);
+
+       for (i = 0; i < max_size; i++) {
+               if (st_lsm6dsx_decimator_table[i].decimator == decimator)
                        break;
+       }
 
        return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val;
 }
@@ -111,6 +117,13 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
        }
 }
 
+static u8 st_lsm6dsx_get_sip(struct st_lsm6dsx_sensor *sensor, u32 min_odr)
+{
+       u8 sip = sensor->odr / min_odr;
+
+       return sip > 1 ? round_down(sip, 2) : sip;
+}
+
 static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
 {
        const struct st_lsm6dsx_reg *ts_dec_reg;
@@ -131,12 +144,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
                sensor = iio_priv(hw->iio_devs[i]);
                /* update fifo decimators and sample in pattern */
                if (hw->enable_mask & BIT(sensor->id)) {
-                       sensor->sip = sensor->odr / min_odr;
-                       sensor->decimator = max_odr / sensor->odr;
-                       data = st_lsm6dsx_get_decimator_val(sensor->decimator);
+                       sensor->sip = st_lsm6dsx_get_sip(sensor, min_odr);
+                       data = st_lsm6dsx_get_decimator_val(sensor, max_odr);
                } else {
                        sensor->sip = 0;
-                       sensor->decimator = 0;
                        data = 0;
                }
                ts_sip = max_t(u16, ts_sip, sensor->sip);
@@ -176,17 +187,10 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
                             enum st_lsm6dsx_fifo_mode fifo_mode)
 {
        unsigned int data;
-       int err;
 
        data = FIELD_PREP(ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode);
-       err = st_lsm6dsx_update_bits_locked(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
-                                           ST_LSM6DSX_FIFO_MODE_MASK, data);
-       if (err < 0)
-               return err;
-
-       hw->fifo_mode = fifo_mode;
-
-       return 0;
+       return st_lsm6dsx_update_bits_locked(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+                                            ST_LSM6DSX_FIFO_MODE_MASK, data);
 }
 
 static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
@@ -608,11 +612,17 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
 int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
 {
        struct st_lsm6dsx_hw *hw = sensor->hw;
+       u8 fifo_mask;
        int err;
 
        mutex_lock(&hw->conf_lock);
 
-       if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) {
+       if (enable)
+               fifo_mask = hw->fifo_mask | BIT(sensor->id);
+       else
+               fifo_mask = hw->fifo_mask & ~BIT(sensor->id);
+
+       if (hw->fifo_mask) {
                err = st_lsm6dsx_flush_fifo(hw);
                if (err < 0)
                        goto out;
@@ -642,15 +652,19 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
        if (err < 0)
                goto out;
 
-       if (hw->enable_mask) {
+       if (fifo_mask) {
                /* reset hw ts counter */
                err = st_lsm6dsx_reset_hw_ts(hw);
                if (err < 0)
                        goto out;
 
                err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
+               if (err < 0)
+                       goto out;
        }
 
+       hw->fifo_mask = fifo_mask;
+
 out:
        mutex_unlock(&hw->conf_lock);
 
index 11b2c7bc8041a61cdb85c6f1059020700d83bd28..b921dd9e108faa5bfd017a1a7550c5095ff0c924 100644 (file)
@@ -1301,7 +1301,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id,
 
        for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
                for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
-                       if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
+                       if (st_lsm6dsx_sensor_settings[i].id[j].name &&
+                           id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
                                break;
                }
                if (j < ST_LSM6DSX_MAX_ID)
@@ -1447,8 +1448,9 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
        return st_lsm6dsx_update_bits_locked(hw, reg->addr, reg->mask, data);
 }
 
-int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
-                                bool enable)
+static int
+__st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
+                              bool enable)
 {
        struct st_lsm6dsx_hw *hw = sensor->hw;
        u32 odr = enable ? sensor->odr : 0;
@@ -1466,6 +1468,26 @@ int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
        return 0;
 }
 
+static int
+st_lsm6dsx_check_events(struct st_lsm6dsx_sensor *sensor, bool enable)
+{
+       struct st_lsm6dsx_hw *hw = sensor->hw;
+
+       if (sensor->id == ST_LSM6DSX_ID_GYRO || enable)
+               return 0;
+
+       return hw->enable_event;
+}
+
+int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
+                                bool enable)
+{
+       if (st_lsm6dsx_check_events(sensor, enable))
+               return 0;
+
+       return __st_lsm6dsx_sensor_set_enable(sensor, enable);
+}
+
 static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
                                   u8 addr, int *val)
 {
@@ -1661,7 +1683,7 @@ st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
        struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
        struct st_lsm6dsx_hw *hw = sensor->hw;
        u8 enable_event;
-       int err = 0;
+       int err;
 
        if (type != IIO_EV_TYPE_THRESH)
                return -EINVAL;
@@ -1689,7 +1711,8 @@ st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
                return err;
 
        mutex_lock(&hw->conf_lock);
-       err = st_lsm6dsx_sensor_set_enable(sensor, state);
+       if (enable_event || !(hw->fifo_mask & BIT(sensor->id)))
+               err = __st_lsm6dsx_sensor_set_enable(sensor, state);
        mutex_unlock(&hw->conf_lock);
        if (err < 0)
                return err;
@@ -2300,7 +2323,7 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
                hw->suspend_mask |= BIT(sensor->id);
        }
 
-       if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS)
+       if (hw->fifo_mask)
                err = st_lsm6dsx_flush_fifo(hw);
 
        return err;
@@ -2336,7 +2359,7 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
                hw->suspend_mask &= ~BIT(sensor->id);
        }
 
-       if (hw->enable_mask)
+       if (hw->fifo_mask)
                err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
 
        return err;
index c193d64e52179f296f8c68c1af09fdd77b16b425..112225c0e4868b0fbf4d6d31b366b2c610b53f2d 100644 (file)
@@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
                                const unsigned long *mask, bool timestamp)
 {
        unsigned bytes = 0;
-       int length, i;
+       int length, i, largest = 0;
 
        /* How much space will the demuxed element take? */
        for_each_set_bit(i, mask,
@@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
                length = iio_storage_bytes_for_si(indio_dev, i);
                bytes = ALIGN(bytes, length);
                bytes += length;
+               largest = max(largest, length);
        }
 
        if (timestamp) {
                length = iio_storage_bytes_for_timestamp(indio_dev);
                bytes = ALIGN(bytes, length);
                bytes += length;
+               largest = max(largest, length);
        }
+
+       bytes = ALIGN(bytes, largest);
        return bytes;
 }
 
index 16dacea9eadfa540afd57c7889339fc03384f639..b0e241aaefb48863c28f06235bdab3b3ac221fa7 100644 (file)
@@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data)
        if (ret < 0)
                return ret;
 
-       data->al_scale = 24000;
        data->vcnl4200_al.reg = VCNL4200_AL_DATA;
        data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
        switch (id) {
@@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data)
                /* show 54ms in total. */
                data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
                data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+               data->al_scale = 24000;
                break;
        case VCNL4040_PROD_ID:
                /* Integration time is 80ms, add 10ms. */
                data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
                data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+               data->al_scale = 120000;
                break;
        }
        data->vcnl4200_al.last_measurement = ktime_set(0, 0);
index ddf47023364b0c840e6579b2e1f29d19de236645..d39c0d6b77f1ca2bbf20ffc0865846912bbb8885 100644 (file)
@@ -444,8 +444,10 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
                        else
                                temp = __convert_to_raw(temp, resolution);
                } else {
-                       of_property_read_u32_index(np, propname, index,
-                                                  (u32 *)&temp);
+                       u32 t32;
+
+                       of_property_read_u32_index(np, propname, index, &t32);
+                       temp = t32;
                }
 
                for (j = 0; j < n_size; j++)
index 25f2b70fd8efb1e29de3b4d93a22a1aba411af13..43a6f07e0afe2b9ffb62e59c052e189a15569189 100644 (file)
@@ -4763,6 +4763,7 @@ err_ib:
 err:
        unregister_netdevice_notifier(&cma_nb);
        ib_sa_unregister_client(&sa_client);
+       unregister_pernet_subsys(&cma_pernet_operations);
 err_wq:
        destroy_workqueue(cma_wq);
        return ret;
index 8434ec082c3ae43290961416e4f138a83317ccb9..2257d7f7810fd99396257039e897c5ddc3b59d31 100644 (file)
@@ -286,6 +286,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
        struct rdma_counter *counter;
        int ret;
 
+       if (!qp->res.valid)
+               return 0;
+
        if (!rdma_is_port_valid(dev, port))
                return -EINVAL;
 
index f509c478b469d118c8c3ed66ca4b1876d704ec4f..b7cb59844ece450e491f5b18e47105c88b3b2527 100644 (file)
@@ -238,28 +238,32 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
 EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
 
 /**
- * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa
+ * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa
+ *                                      in a given range.
  *
  * @ucontext: associated user context.
  * @entry: the entry to insert into the mmap_xa
  * @length: length of the address that will be mmapped
+ * @min_pgoff: minimum pgoff to be returned
+ * @max_pgoff: maximum pgoff to be returned
  *
  * This function should be called by drivers that use the rdma_user_mmap
  * interface for implementing their mmap syscall A database of mmap offsets is
  * handled in the core and helper functions are provided to insert entries
  * into the database and extract entries when the user calls mmap with the
- * given offset.  The function allocates a unique page offset that should be
- * provided to user, the user will use the offset to retrieve information such
- * as address to be mapped and how.
+ * given offset. The function allocates a unique page offset in a given range
+ * that should be provided to user, the user will use the offset to retrieve
+ * information such as address to be mapped and how.
  *
  * Return: 0 on success and -ENOMEM on failure
  */
-int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
-                               struct rdma_user_mmap_entry *entry,
-                               size_t length)
+int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+                                     struct rdma_user_mmap_entry *entry,
+                                     size_t length, u32 min_pgoff,
+                                     u32 max_pgoff)
 {
        struct ib_uverbs_file *ufile = ucontext->ufile;
-       XA_STATE(xas, &ucontext->mmap_xa, 0);
+       XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
        u32 xa_first, xa_last, npages;
        int err;
        u32 i;
@@ -285,7 +289,7 @@ int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
        entry->npages = npages;
        while (true) {
                /* First find an empty index */
-               xas_find_marked(&xas, U32_MAX, XA_FREE_MARK);
+               xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
                if (xas.xa_node == XAS_RESTART)
                        goto err_unlock;
 
@@ -332,4 +336,30 @@ err_unlock:
        mutex_unlock(&ufile->umap_lock);
        return -ENOMEM;
 }
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa.
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for handling user mmapped addresses. The database is handled in
+ * the core and helper functions are provided to insert entries into the
+ * database and extract entries when the user calls mmap with the given offset.
+ * The function allocates a unique page offset that should be provided to user,
+ * the user will use the offset to retrieve information such as address to
+ * be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+                               struct rdma_user_mmap_entry *entry,
+                               size_t length)
+{
+       return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
+                                                U32_MAX);
+}
 EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
index 9b6ca15a183cd880a77574c3d473c4df47203412..ad5112a2325f9f2b51d8407a538de99d2d6473bd 100644 (file)
@@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
        int rc;
 
        rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-       if (rc)
+       if (rc) {
                dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+               return rc;
+       }
 
        if (mr->pages) {
                rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
index 958c1ff9c515c46767f2a3a1d03a1687671ebf61..020f70e6865e14b849ccc49c6856fbb02a927ce4 100644 (file)
@@ -442,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
                goto fail;
        }
        /* Unconditionally map 8 bytes to support 57500 series */
-       nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
+       nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
        if (!nq->bar_reg_iomem) {
                rc = -ENOMEM;
                goto fail;
@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
                        /* Add qp to flush list of the CQ */
                        bnxt_qplib_add_flush_qp(qp);
                } else {
+                       /* Before we complete, do WA 9060 */
+                       if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+                                     cqe_sq_cons)) {
+                               *lib_qp = qp;
+                               goto out;
+                       }
                        if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
-                               /* Before we complete, do WA 9060 */
-                               if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
-                                             cqe_sq_cons)) {
-                                       *lib_qp = qp;
-                                       goto out;
-                               }
                                cqe->status = CQ_REQ_STATUS_OK;
                                cqe++;
                                (*budget)--;
index 5cdfa84faf85e1ff35a2ead998a06ba2cf0a1c3b..1291b12287a5dfd8728f6ee79678ba0432d1e867 100644 (file)
@@ -717,7 +717,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
        if (!res_base)
                return -ENOMEM;
 
-       rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
+       rcfw->cmdq_bar_reg_iomem = ioremap(res_base +
                                              RCFW_COMM_BASE_OFFSET,
                                              RCFW_COMM_SIZE);
        if (!rcfw->cmdq_bar_reg_iomem) {
@@ -739,7 +739,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
                        "CREQ BAR region %d resc start is 0!\n",
                        rcfw->creq_bar_reg);
        /* Unconditionally map 8 bytes to support 57500 series */
-       rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
+       rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off,
                                                   8);
        if (!rcfw->creq_bar_reg_iomem) {
                dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
index bdbde8e22420d39042a91770a36cd2f6c6399c60..60ea1b924b67046b71b780fc1f70662e2d02549f 100644 (file)
@@ -704,7 +704,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
                return -ENOMEM;
        }
 
-       dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
+       dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
                                                  dbr_len);
        if (!dpit->dbr_bar_reg_iomem) {
                dev_err(&res->pdev->dev,
index c9d294caa27a9d8d0ec61c461b71c2e0c20113d7..50c22575aed65041c33f2f008cd38a094cc2a714 100644 (file)
@@ -145,7 +145,7 @@ static inline bool is_rdma_read_cap(struct efa_dev *dev)
 }
 
 #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
-                                FIELD_SIZEOF(typeof(x), fld) <= (sz))
+                                sizeof_field(typeof(x), fld) <= (sz))
 
 #define is_reserved_cleared(reserved) \
        !memchr_inv(reserved, 0, sizeof(reserved))
index adb4a1ba921b8cb4d593568f9445674bb0f7a3e4..5836fe7b28177b2ea2caea38d5742061e15dac8d 100644 (file)
@@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
 void iowait_cancel_work(struct iowait *w)
 {
        cancel_work_sync(&iowait_get_ib_work(w)->iowork);
-       cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+       /* Make sure that the iowork for TID RDMA is used */
+       if (iowait_get_tid_work(w)->iowork.func)
+               cancel_work_sync(&iowait_get_tid_work(w)->iowork);
 }
 
 /**
index 61362bd6d3cedbca307c7c4bb3eb201128329a16..1a6268d61977aa0bac0649d57fdda94be36a20ba 100644 (file)
@@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
                return -EINVAL;
        }
 
-       dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
+       dd->kregbase1 = ioremap(addr, RCV_ARRAY);
        if (!dd->kregbase1) {
                dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
                return -ENOMEM;
@@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
        dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
        dd->base2_start  = RCV_ARRAY + rcv_array_count * 8;
 
-       dd->kregbase2 = ioremap_nocache(
+       dd->kregbase2 = ioremap(
                addr + dd->base2_start,
                TXE_PIO_SEND - dd->base2_start);
        if (!dd->kregbase2) {
index 5774dfc22e18c8c2bdb0f8ac79daf53d6dbf4e5f..a51525647ac86bd4d6b210c75f2c83ef0717dd64 100644 (file)
@@ -848,7 +848,7 @@ static const struct rhashtable_params sdma_rht_params = {
        .nelem_hint = NR_CPUS_HINT,
        .head_offset = offsetof(struct sdma_rht_node, node),
        .key_offset = offsetof(struct sdma_rht_node, cpu_id),
-       .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
+       .key_len = sizeof_field(struct sdma_rht_node, cpu_id),
        .max_size = NR_CPUS,
        .min_size = 8,
        .automatic_shrinking = true,
index e53f542b60af8a4da8201ae00286aa3765ecfdab..8a2e0d9351e91e569da79b011f2deb2619365a27 100644 (file)
@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
                         */
                        fpsn = full_flow_psn(flow, flow->flow_state.spsn);
                        req->r_ack_psn = psn;
+                       /*
+                        * If resync_psn points to the last flow PSN for a
+                        * segment and the new segment (likely from a new
+                        * request) starts with a new generation number, we
+                        * need to adjust resync_psn accordingly.
+                        */
+                       if (flow->flow_state.generation !=
+                           (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
+                               resync_psn = mask_psn(fpsn - 1);
                        flow->resync_npkts +=
                                delta_psn(mask_psn(resync_psn + 1), fpsn);
                        /*
index 343fb9894a820de96f3308cb4153b5e675e3bcf9..985ffa9cc958f02b89eaab9d8cfa7a41072d8084 100644 (file)
@@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */
        TP_ARGS(dd, index, type, pa, order),
        TP_STRUCT__entry(/* entry */
                DD_DEV_ENTRY(dd)
-               __field(unsigned long, pa);
-               __field(u32, index);
-               __field(u32, type);
-               __field(u16, order);
+               __field(unsigned long, pa)
+               __field(u32, index)
+               __field(u32, type)
+               __field(u16, order)
        ),
        TP_fast_assign(/* assign */
                DD_DEV_ASSIGN(dd);
index 09eb0c9ada002510661f197b8c7b097b9cfd41d2..769e5e4710c6499d7f38703062ffc97f077765ab 100644 (file)
@@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo,
            TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
            TP_ARGS(dd, ctxt, subctxt, i),
            TP_STRUCT__entry(
-                   DD_DEV_ENTRY(dd);
+                   DD_DEV_ENTRY(dd)
                    __field(u16, ctxt)
                    __field(u8, subctxt)
                    __field(u8, ver_opcode)
index b0e9bf7cd150898cf0dd779a13d765a994adb6cb..d36e3e14896dd7b38a6a7c9edc6a89198f5e362d 100644 (file)
@@ -107,9 +107,9 @@ enum {
        HFI1_HAS_GRH = (1 << 0),
 };
 
-#define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh))
+#define LRH_16B_BYTES (sizeof_field(struct hfi1_16b_header, lrh))
 #define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32))
-#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh))
+#define LRH_9B_BYTES (sizeof_field(struct ib_header, lrh))
 #define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
 
 /* 24Bits for qpn, upper 8Bits reserved */
index 86375947bc67a9550cb26658999ee0908d798254..dbd96d029d8bd073c88229ba14ffc15d6feb7094 100644 (file)
@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
 static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
        struct i40iw_ucontext *ucontext;
-       u64 db_addr_offset;
-       u64 push_offset;
+       u64 db_addr_offset, push_offset, pfn;
 
        ucontext = to_ucontext(context);
        if (ucontext->iwdev->sc_dev.is_pf) {
@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 
        if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               vma->vm_private_data = ucontext;
        } else {
                if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
                        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        }
 
-       if (io_remap_pfn_range(vma, vma->vm_start,
-                              vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
-                              PAGE_SIZE, vma->vm_page_prot))
-               return -EAGAIN;
+       pfn = vma->vm_pgoff +
+             (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
+              PAGE_SHIFT);
 
-       return 0;
+       return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+                                vma->vm_page_prot, NULL);
 }
 
 /**
index 0b5dc1d5928f00ebfa35920f5936d39626c19738..34055cbab38cf9032f860c392e174b63ecd47bed 100644 (file)
@@ -3018,16 +3018,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        ibdev->ib_active = false;
        flush_workqueue(wq);
 
-       mlx4_ib_close_sriov(ibdev);
-       mlx4_ib_mad_cleanup(ibdev);
-       ib_unregister_device(&ibdev->ib_dev);
-       mlx4_ib_diag_cleanup(ibdev);
        if (ibdev->iboe.nb.notifier_call) {
                if (unregister_netdevice_notifier(&ibdev->iboe.nb))
                        pr_warn("failure unregistering notifier\n");
                ibdev->iboe.nb.notifier_call = NULL;
        }
 
+       mlx4_ib_close_sriov(ibdev);
+       mlx4_ib_mad_cleanup(ibdev);
+       ib_unregister_device(&ibdev->ib_dev);
+       mlx4_ib_diag_cleanup(ibdev);
+
        mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
                              ibdev->steer_qpn_count);
        kfree(ibdev->ib_uc_qpns_bitmap);
index 4937947400cd005913f28ea633cb6a56ad5a9ccd..4c26492ab8a32507b2ece6858b6d1abf890ae161 100644 (file)
@@ -157,7 +157,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
        return -ENOMEM;
 }
 
-int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
 {
        struct mlx5_core_dev *dev = dm->dev;
        u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
@@ -175,15 +175,13 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
        MLX5_SET(dealloc_memic_in, in, memic_size, length);
 
        err =  mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return;
 
-       if (!err) {
-               spin_lock(&dm->lock);
-               bitmap_clear(dm->memic_alloc_pages,
-                            start_page_idx, num_pages);
-               spin_unlock(&dm->lock);
-       }
-
-       return err;
+       spin_lock(&dm->lock);
+       bitmap_clear(dm->memic_alloc_pages,
+                    start_page_idx, num_pages);
+       spin_unlock(&dm->lock);
 }
 
 int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
index 169cab4915e3f91093bbd7d7414e30f120dc2d28..945ebce736134f2f12eeea58ab5a34a6eb26e003 100644 (file)
@@ -46,7 +46,7 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
                                void *in, int in_size);
 int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
                         u64 length, u32 alignment);
-int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
 void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
index 51100350b688058dc782306a57fa81abd907612f..df88bfbd7fed326ff92ca2071ceedd938ba67354 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/slab.h>
 #include <linux/bitmap.h>
 #if defined(CONFIG_X86)
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #endif
 #include <linux/sched.h>
 #include <linux/sched/mm.h>
@@ -2074,6 +2074,24 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
                              virt_to_page(dev->mdev->clock_info));
 }
 
+static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
+{
+       struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
+       struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
+       struct mlx5_ib_dm *mdm;
+
+       switch (mentry->mmap_flag) {
+       case MLX5_IB_MMAP_TYPE_MEMIC:
+               mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
+               mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
+                                      mdm->size);
+               kfree(mdm);
+               break;
+       default:
+               WARN_ON(true);
+       }
+}
+
 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
                    struct vm_area_struct *vma,
                    struct mlx5_ib_ucontext *context)
@@ -2186,26 +2204,55 @@ free_bfreg:
        return err;
 }
 
-static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+                            struct mlx5_ib_dm *mdm,
+                            u64 address)
+{
+       mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
+       mdm->mentry.address = address;
+       return rdma_user_mmap_entry_insert_range(
+                       context, &mdm->mentry.rdma_entry,
+                       mdm->size,
+                       MLX5_IB_MMAP_DEVICE_MEM << 16,
+                       (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
 {
-       struct mlx5_ib_ucontext *mctx = to_mucontext(context);
-       struct mlx5_ib_dev *dev = to_mdev(context->device);
-       u16 page_idx = get_extended_index(vma->vm_pgoff);
-       size_t map_size = vma->vm_end - vma->vm_start;
-       u32 npages = map_size >> PAGE_SHIFT;
+       unsigned long idx;
+       u8 command;
+
+       command = get_command(vma->vm_pgoff);
+       idx = get_extended_index(vma->vm_pgoff);
+
+       return (command << 16 | idx);
+}
+
+static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
+                              struct vm_area_struct *vma,
+                              struct ib_ucontext *ucontext)
+{
+       struct mlx5_user_mmap_entry *mentry;
+       struct rdma_user_mmap_entry *entry;
+       unsigned long pgoff;
+       pgprot_t prot;
        phys_addr_t pfn;
+       int ret;
 
-       if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
-           page_idx + npages)
+       pgoff = mlx5_vma_to_pgoff(vma);
+       entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
+       if (!entry)
                return -EINVAL;
 
-       pfn = ((dev->mdev->bar_addr +
-             MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
-             PAGE_SHIFT) +
-             page_idx;
-       return rdma_user_mmap_io(context, vma, pfn, map_size,
-                                pgprot_writecombine(vma->vm_page_prot),
-                                NULL);
+       mentry = to_mmmap(entry);
+       pfn = (mentry->address >> PAGE_SHIFT);
+       prot = pgprot_writecombine(vma->vm_page_prot);
+       ret = rdma_user_mmap_io(ucontext, vma, pfn,
+                               entry->npages * PAGE_SIZE,
+                               prot,
+                               entry);
+       rdma_user_mmap_entry_put(&mentry->rdma_entry);
+       return ret;
 }
 
 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,11 +2295,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
        case MLX5_IB_MMAP_CLOCK_INFO:
                return mlx5_ib_mmap_clock_info_page(dev, vma, context);
 
-       case MLX5_IB_MMAP_DEVICE_MEM:
-               return dm_mmap(ibcontext, vma);
-
        default:
-               return -EINVAL;
+               return mlx5_ib_mmap_offset(dev, vma, ibcontext);
        }
 
        return 0;
@@ -2288,8 +2332,9 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
 {
        struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
        u64 start_offset;
-       u32 page_idx;
+       u16 page_idx;
        int err;
+       u64 address;
 
        dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
 
@@ -2298,28 +2343,30 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
        if (err)
                return err;
 
-       page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
-                   MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
-                   PAGE_SHIFT;
+       address = dm->dev_addr & PAGE_MASK;
+       err = add_dm_mmap_entry(ctx, dm, address);
+       if (err)
+               goto err_dealloc;
 
+       page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
        err = uverbs_copy_to(attrs,
                             MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
-                            &page_idx, sizeof(page_idx));
+                            &page_idx,
+                            sizeof(page_idx));
        if (err)
-               goto err_dealloc;
+               goto err_copy;
 
        start_offset = dm->dev_addr & ~PAGE_MASK;
        err = uverbs_copy_to(attrs,
                             MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
                             &start_offset, sizeof(start_offset));
        if (err)
-               goto err_dealloc;
-
-       bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
-                  DIV_ROUND_UP(dm->size, PAGE_SIZE));
+               goto err_copy;
 
        return 0;
 
+err_copy:
+       rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
 err_dealloc:
        mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
 
@@ -2423,23 +2470,13 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
        struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
                &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
        struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
-       struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
        struct mlx5_ib_dm *dm = to_mdm(ibdm);
-       u32 page_idx;
        int ret;
 
        switch (dm->type) {
        case MLX5_IB_UAPI_DM_TYPE_MEMIC:
-               ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
-               if (ret)
-                       return ret;
-
-               page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) -
-                           MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >>
-                           PAGE_SHIFT;
-               bitmap_clear(ctx->dm_pages, page_idx,
-                            DIV_ROUND_UP(dm->size, PAGE_SIZE));
-               break;
+               rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+               return 0;
        case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
                ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
                                             dm->size, ctx->devx_uid, dm->dev_addr,
@@ -3544,10 +3581,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
        }
 
        INIT_LIST_HEAD(&handler->list);
-       if (dst) {
-               memcpy(&dest_arr[0], dst, sizeof(*dst));
-               dest_num++;
-       }
 
        for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
                err = parse_flow_attr(dev->mdev, spec,
@@ -3560,6 +3593,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
                ib_flow += ((union ib_flow_spec *)ib_flow)->size;
        }
 
+       if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
+               memcpy(&dest_arr[0], dst, sizeof(*dst));
+               dest_num++;
+       }
+
        if (!flow_is_multicast_only(flow_attr))
                set_underlay_qp(dev, spec, underlay_qpn);
 
@@ -3600,10 +3638,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
        }
 
        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
-               if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
+               if (!dest_num)
                        rule_dst = NULL;
-                       dest_num = 0;
-               }
        } else {
                if (is_egress)
                        flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@@ -6236,6 +6272,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
        .map_mr_sg = mlx5_ib_map_mr_sg,
        .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
        .mmap = mlx5_ib_mmap,
+       .mmap_free = mlx5_ib_mmap_free,
        .modify_cq = mlx5_ib_modify_cq,
        .modify_device = mlx5_ib_modify_device,
        .modify_port = mlx5_ib_modify_port,
index 5986953ec2facfde153a3dae39e389d6b0747c39..b06f32ff5748f51b64fc24640816621719d2fba4 100644 (file)
@@ -118,6 +118,10 @@ enum {
        MLX5_MEMIC_BASE_SIZE    = 1 << MLX5_MEMIC_BASE_ALIGN,
 };
 
+enum mlx5_ib_mmap_type {
+       MLX5_IB_MMAP_TYPE_MEMIC = 1,
+};
+
 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)                                        \
        (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -135,7 +139,6 @@ struct mlx5_ib_ucontext {
        u32                     tdn;
 
        u64                     lib_caps;
-       DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
        u16                     devx_uid;
        /* For RoCE LAG TX affinity */
        atomic_t                tx_port_affinity;
@@ -556,6 +559,12 @@ enum mlx5_ib_mtt_access_flags {
        MLX5_IB_MTT_WRITE = (1 << 1),
 };
 
+struct mlx5_user_mmap_entry {
+       struct rdma_user_mmap_entry rdma_entry;
+       u8 mmap_flag;
+       u64 address;
+};
+
 struct mlx5_ib_dm {
        struct ib_dm            ibdm;
        phys_addr_t             dev_addr;
@@ -567,6 +576,7 @@ struct mlx5_ib_dm {
                } icm_dm;
                /* other dm types specific params should be added here */
        };
+       struct mlx5_user_mmap_entry mentry;
 };
 
 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
@@ -1101,6 +1111,13 @@ to_mflow_act(struct ib_flow_action *ibact)
        return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
 }
 
+static inline struct mlx5_user_mmap_entry *
+to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+       return container_of(rdma_entry,
+               struct mlx5_user_mmap_entry, rdma_entry);
+}
+
 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
                        struct ib_udata *udata, unsigned long virt,
                        struct mlx5_db *db);
index dd4843379f51de80f3d0219f13fec4005445abe3..91d64dd71a8a338a55ffc14e365fe42d651380f5 100644 (file)
@@ -6630,7 +6630,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
        /* vl15 buffers start just after the 4k buffers */
        vl15off = dd->physaddr + (dd->piobufbase >> 32) +
                  dd->piobcnt4k * dd->align4k;
-       dd->piovl15base = ioremap_nocache(vl15off,
+       dd->piovl15base = ioremap(vl15off,
                                          NUM_VL15_BUFS * dd->align4k);
        if (!dd->piovl15base) {
                ret = -ENOMEM;
index d4fd8a6cff7b9176fc2ba33ae6ed872c189326af..43c8ee1f46e042a37e74973db87f86ccc566e47a 100644 (file)
@@ -1759,7 +1759,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
                qib_userlen = dd->ureg_align * dd->cfgctxts;
 
        /* Sanity checks passed, now create the new mappings */
-       qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+       qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
        if (!qib_kregbase)
                goto bail;
 
@@ -1768,7 +1768,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
                goto bail_kregbase;
 
        if (qib_userlen) {
-               qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+               qib_userbase = ioremap(qib_physaddr + dd->uregbase,
                                               qib_userlen);
                if (!qib_userbase)
                        goto bail_piobase;
index 864f2af171f70278106b58bb934ad4936da502be..3dc6ce03331905f9d21716ac6ae3a4dafdc85c65 100644 (file)
@@ -145,7 +145,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
        addr = pci_resource_start(pdev, 0);
        len = pci_resource_len(pdev, 0);
 
-       dd->kregbase = ioremap_nocache(addr, len);
+       dd->kregbase = ioremap(addr, len);
        if (!dd->kregbase)
                return -ENOMEM;
 
index f9a492ed900b91423a8de5dd91932a370a6b2a00..831ad578a7b29d2ba61a3ed475f2c7ffdcffc7a3 100644 (file)
@@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb)
 
        calc_icrc = rxe_icrc_hdr(pkt, skb);
        calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
-                             payload_size(pkt));
+                             payload_size(pkt) + bth_pad(pkt));
        calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
        if (unlikely(calc_icrc != pack_icrc)) {
                if (skb->protocol == htons(ETH_P_IPV6))
index c5d9b558fa90a2f8e85af19a8c6a7d85e3e6fd9f..e5031172c0193ad3e2906210fe2e20f2da398a32 100644 (file)
@@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
                        if (err)
                                return err;
                }
+               if (bth_pad(pkt)) {
+                       u8 *pad = payload_addr(pkt) + paylen;
+
+                       memset(pad, 0, bth_pad(pkt));
+                       crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
+               }
        }
        p = payload_addr(pkt) + paylen + bth_pad(pkt);
 
index 1cbfbd98eb221804e9424ecabbe267f6ade8c82f..c4a8195bf670945ef017837337598f3eb0cdc8e3 100644 (file)
@@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp,
        if (err)
                pr_err("Failed copying memory\n");
 
+       if (bth_pad(&ack_pkt)) {
+               struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+               u8 *pad = payload_addr(&ack_pkt) + payload;
+
+               memset(pad, 0, bth_pad(&ack_pkt));
+               icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
+       }
        p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
        *p = ~icrc;
 
index a1a035270cabf0b0dac14542b5f879514c7ce2e1..b273e421e9103f9a4aa7652bda41f6c08a9dc7b6 100644 (file)
@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
        }
 }
 
-static void
-isert_wait4cmds(struct iscsi_conn *conn)
-{
-       isert_info("iscsi_conn %p\n", conn);
-
-       if (conn->sess) {
-               target_sess_cmd_list_set_waiting(conn->sess->se_sess);
-               target_wait_for_sess_cmds(conn->sess->se_sess);
-       }
-}
-
 /**
  * isert_put_unsol_pending_cmds() - Drop commands waiting for
  *     unsolicitate dataout
@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
 
        ib_drain_qp(isert_conn->qp);
        isert_put_unsol_pending_cmds(conn);
-       isert_wait4cmds(conn);
        isert_wait4logout(isert_conn);
 
        queue_work(isert_release_wq, &isert_conn->release_work);
index 62390e9e002362ce5fe0e4ec499591c0a46798d7..8ad7da989a0ec57f3cdfbcc2dc895d4d5792f954 100644 (file)
@@ -63,7 +63,7 @@ struct vnic_stats {
        };
 };
 
-#define VNIC_STAT(m)            { FIELD_SIZEOF(struct opa_vnic_stats, m),   \
+#define VNIC_STAT(m)            { sizeof_field(struct opa_vnic_stats, m),   \
                                  offsetof(struct opa_vnic_stats, m) }
 
 static struct vnic_stats vnic_gstrings_stats[] = {
index d7dd6fcf2db05a8841e60a4089dd71212d15c476..cb6e3a5f509c8a06ad03fc5bdf26d137189cbb9b 100644 (file)
@@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client,
                 */
                client->tail = (client->head - 2) & (client->bufsize - 1);
 
-               client->buffer[client->tail].input_event_sec =
-                                               event->input_event_sec;
-               client->buffer[client->tail].input_event_usec =
-                                               event->input_event_usec;
-               client->buffer[client->tail].type = EV_SYN;
-               client->buffer[client->tail].code = SYN_DROPPED;
-               client->buffer[client->tail].value = 0;
+               client->buffer[client->tail] = (struct input_event) {
+                       .input_event_sec = event->input_event_sec,
+                       .input_event_usec = event->input_event_usec,
+                       .type = EV_SYN,
+                       .code = SYN_DROPPED,
+                       .value = 0,
+               };
 
                client->packet_head = client->tail;
        }
@@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
        struct evdev_client *client;
        int error;
 
-       client = kzalloc(struct_size(client, buffer, bufsize),
-                        GFP_KERNEL | __GFP_NOWARN);
-       if (!client)
-               client = vzalloc(struct_size(client, buffer, bufsize));
+       client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
        if (!client)
                return -ENOMEM;
 
index 55086279d044e752e2e009639d36bb89e2462f4d..ee6c3234df3634800f257207b3970517d5e68384 100644 (file)
@@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev,
                }
        }
 
-       __clear_bit(*old_keycode, dev->keybit);
-       __set_bit(ke->keycode, dev->keybit);
-
-       for (i = 0; i < dev->keycodemax; i++) {
-               if (input_fetch_keycode(dev, i) == *old_keycode) {
-                       __set_bit(*old_keycode, dev->keybit);
-                       break; /* Setting the bit twice is useless, so break */
+       if (*old_keycode <= KEY_MAX) {
+               __clear_bit(*old_keycode, dev->keybit);
+               for (i = 0; i < dev->keycodemax; i++) {
+                       if (input_fetch_keycode(dev, i) == *old_keycode) {
+                               __set_bit(*old_keycode, dev->keybit);
+                               /* Setting the bit twice is useless, so break */
+                               break;
+                       }
                }
        }
 
+       __set_bit(ke->keycode, dev->keybit);
        return 0;
 }
 
@@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev,
         * Simulate keyup event if keycode is not present
         * in the keymap anymore
         */
-       if (test_bit(EV_KEY, dev->evbit) &&
-           !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
-           __test_and_clear_bit(old_keycode, dev->key)) {
+       if (old_keycode > KEY_MAX) {
+               dev_warn(dev->dev.parent ?: &dev->dev,
+                        "%s: got too big old keycode %#x\n",
+                        __func__, old_keycode);
+       } else if (test_bit(EV_KEY, dev->evbit) &&
+                  !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+                  __test_and_clear_bit(old_keycode, dev->key)) {
                struct input_value vals[] =  {
                        { EV_KEY, old_keycode, 0 },
                        input_value_sync
index 53799527dc75b27b57a48639af7a8d71af134ffa..9f809aeb785c4927d60009d233610d313fc07e9d 100644 (file)
@@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work)
                return;
        }
 
-       state = (bool)msg.state;
+       /*
+        * The response data from SCU firmware is 4 bytes,
+        * but ONLY the first byte is the key state, other
+        * 3 bytes could be some dirty data, so we should
+        * ONLY take the first byte as key state.
+        */
+       state = (bool)(msg.state & 0xff);
 
        if (state ^ priv->keystate) {
                priv->keystate = state;
index f7414091d94e2ace58eb14a77b6138dbdc68489d..2fe9dcfe0a6fd8e21ae6e4ef39a106036f4a62b6 100644 (file)
@@ -107,7 +107,7 @@ static int pxa930_rotary_probe(struct platform_device *pdev)
        if (!r)
                return -ENOMEM;
 
-       r->mmio_base = ioremap_nocache(res->start, resource_size(res));
+       r->mmio_base = ioremap(res->start, resource_size(res));
        if (r->mmio_base == NULL) {
                dev_err(&pdev->dev, "failed to remap IO memory\n");
                err = -ENXIO;
index 27ad73f43451d41d64c0f150b22b6b5f144bdc5d..c155adebf96e4d21d3fdeaab81100a6e9468e3d1 100644 (file)
@@ -195,7 +195,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
        memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata));
        pdata = &priv->pdata;
 
-       priv->iomem_base = ioremap_nocache(res->start, resource_size(res));
+       priv->iomem_base = ioremap(res->start, resource_size(res));
        if (priv->iomem_base == NULL) {
                dev_err(&pdev->dev, "failed to remap I/O memory\n");
                error = -ENXIO;
index 83368f1e7c4eb714242c2a6dc1788196ca58e011..4650f4a949890f577af46898ab7967210b5a9486 100644 (file)
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
        int retval = 0;
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+                                0x11, 0x40, 0x5601, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
                        __func__, retval);
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
        }
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+                                0x44, 0x40, 0x0, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
                        __func__, retval);
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
        }
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+                                0x22, 0x40, 0x0, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
                        __func__, retval);
index 4d875f2ac13d6720422e03cedf8011919ef5d5e3..ee55f22dbca540ae97f1f06d173fed22adcc6392 100644 (file)
@@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
        return input_register_device(onkey->input);
 }
 
+static const struct of_device_id max77650_onkey_of_match[] = {
+       { .compatible = "maxim,max77650-onkey" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
 static struct platform_driver max77650_onkey_driver = {
        .driver = {
                .name = "max77650-onkey",
+               .of_match_table = max77650_onkey_of_match,
        },
        .probe = max77650_onkey_probe,
 };
index ecd762f93732678cbc6eee68c807478722c5f900..53ad25eaf1a289e80aa64cfb97e2818c1df0fcd9 100644 (file)
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
 
        if (regs->enable_mask)
                rc = regmap_update_bits(vib->regmap, regs->enable_addr,
-                                       on ? regs->enable_mask : 0, val);
+                                       regs->enable_mask, on ? ~0 : 0);
 
        return rc;
 }
index fd253781be711d306a4f0f41ffa59bac1a284000..f2593133e5247a8fbc6cebfc296a2aedeb0c87ff 100644 (file)
@@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev,
        struct uinput_device    *udev = input_get_drvdata(dev);
        struct timespec64       ts;
 
-       udev->buff[udev->head].type = type;
-       udev->buff[udev->head].code = code;
-       udev->buff[udev->head].value = value;
        ktime_get_ts64(&ts);
-       udev->buff[udev->head].input_event_sec = ts.tv_sec;
-       udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+       udev->buff[udev->head] = (struct input_event) {
+               .input_event_sec = ts.tv_sec,
+               .input_event_usec = ts.tv_nsec / NSEC_PER_USEC,
+               .type = type,
+               .code = code,
+               .value = value,
+       };
+
        udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
 
        wake_up_interruptible(&udev->waitq);
@@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
 static __poll_t uinput_poll(struct file *file, poll_table *wait)
 {
        struct uinput_device *udev = file->private_data;
+       __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */
 
        poll_wait(file, &udev->waitq, wait);
 
        if (udev->head != udev->tail)
-               return EPOLLIN | EPOLLRDNORM;
+               mask |= EPOLLIN | EPOLLRDNORM;
 
-       return EPOLLOUT | EPOLLWRNORM;
+       return mask;
 }
 
 static int uinput_release(struct inode *inode, struct file *file)
index 41acde60b60f487046b7fc241a785cfe21e09d93..3332b77eef2aecbe5748722d6ea40eae42677649 100644 (file)
@@ -167,7 +167,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev)
                goto failed;
        }
 
-       trkball->mmio_base = ioremap_nocache(res->start, resource_size(res));
+       trkball->mmio_base = ioremap(res->start, resource_size(res));
        if (!trkball->mmio_base) {
                dev_err(&pdev->dev, "failed to ioremap registers\n");
                error = -ENXIO;
index 0bc01cfc2b518fd7e733a344237a5f3d98ffbd26..6b23e679606eeb9ba3412ba75abd47a90465b1ea 100644 (file)
 #define F54_NUM_TX_OFFSET       1
 #define F54_NUM_RX_OFFSET       0
 
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE   32
+
 /* F54 commands */
 #define F54_GET_REPORT          1
 #define F54_FORCE_CAL           2
@@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work)
        int report_size;
        u8 command;
        int error;
+       int i;
 
        report_size = rmi_f54_get_report_size(f54);
        if (report_size == 0) {
@@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work)
 
        rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
 
-       fifo[0] = 0;
-       fifo[1] = 0;
-       error = rmi_write_block(fn->rmi_dev,
-                               fn->fd.data_base_addr + F54_FIFO_OFFSET,
-                               fifo, sizeof(fifo));
-       if (error) {
-               dev_err(&fn->dev, "Failed to set fifo start offset\n");
-               goto abort;
-       }
+       for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+               int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+               fifo[0] = i & 0xff;
+               fifo[1] = i >> 8;
+               error = rmi_write_block(fn->rmi_dev,
+                                       fn->fd.data_base_addr + F54_FIFO_OFFSET,
+                                       fifo, sizeof(fifo));
+               if (error) {
+                       dev_err(&fn->dev, "Failed to set fifo start offset\n");
+                       goto abort;
+               }
 
-       error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
-                              F54_REPORT_DATA_OFFSET, f54->report_data,
-                              report_size);
-       if (error) {
-               dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
-                       __func__, report_size, error);
-               goto abort;
+               error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+                                      F54_REPORT_DATA_OFFSET,
+                                      f54->report_data + i, size);
+               if (error) {
+                       dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+                               __func__, size, error);
+                       goto abort;
+               }
        }
 
 abort:
index b313c579914f49948e0c3e97f0865ec178b459ca..2407ea43de59b7b5f386d0361c3cc49495df62d3 100644 (file)
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
                /* prepare to write next block of bytes */
                cur_len -= SMB_MAX_COUNT;
                databuff += SMB_MAX_COUNT;
+               rmiaddr += SMB_MAX_COUNT;
        }
 exit:
        mutex_unlock(&rmi_smb->page_mutex);
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
                /* prepare to read next block of bytes */
                cur_len -= SMB_MAX_COUNT;
                databuff += SMB_MAX_COUNT;
+               rmiaddr += SMB_MAX_COUNT;
        }
 
        retval = 0;
index 96f9b5397367fca2388b3186296611ae7287dec9..2f9775de3c5b93c779a1884724e67c3cc389cae0 100644 (file)
@@ -349,7 +349,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
 
        ps2port->port = serio;
        ps2port->padev = dev;
-       ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
+       ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
        spin_lock_init(&ps2port->lock);
 
        gscps2_reset(ps2port);
index 2ca586fb914fbc89253232f70fef726836a59320..e08b0ef078e8198474120972a656b09a0150ee09 100644 (file)
@@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        aiptek->inputdev = inputdev;
        aiptek->intf = intf;
-       aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+       aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
        aiptek->inDelay = 0;
        aiptek->endDelay = 0;
        aiptek->previousJitterable = 0;
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
        input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
 
        /* Verify that a device really has an endpoint */
-       if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&intf->dev,
                        "interface has %d endpoints, but must have minimum 1\n",
-                       intf->altsetting[0].desc.bNumEndpoints);
+                       intf->cur_altsetting->desc.bNumEndpoints);
                err = -EINVAL;
                goto fail3;
        }
-       endpoint = &intf->altsetting[0].endpoint[0].desc;
+       endpoint = &intf->cur_altsetting->endpoint[0].desc;
 
        /* Go set up our URB, which is called when the tablet receives
         * input.
index 35031228a6d076cf0dd91dab411f049ec679df6b..96d65575f75a321b4576dbc646deea424f167a2f 100644 (file)
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
        }
 
        /* Sanity check that a device has an endpoint */
-       if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+       if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&usbinterface->dev,
                        "Invalid number of endpoints\n");
                error = -EINVAL;
                goto err_free_urb;
        }
 
-       /*
-        * The endpoint is always altsetting 0, we know this since we know
-        * this device only has one interrupt endpoint
-        */
-       endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+       endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
 
        /* Some debug */
        dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
        if (usb_endpoint_xfer_int(endpoint))
                dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
 
-       dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+       dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+               usbinterface->cur_altsetting->extralen);
 
        /*
         * Find the HID descriptor so we can find out the size of the
@@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
        input_dev->dev.parent = &usbinterface->dev;
 
        /* Setup the URB, it will be posted later on open of input device */
-       endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
        usb_fill_int_urb(gtco->urbinfo,
                         udev,
                         usb_rcvintpipe(udev,
index a1f3a0cb197ec9b997e5bd3bf73b3871297df26d..38f087404f7af1c18614c507ffb105308f28743b 100644 (file)
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
                return -ENODEV;
 
        /* Sanity check that the device has an endpoint */
-       if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&intf->dev, "Invalid number of endpoints\n");
                return -EINVAL;
        }
index 0af0fe8c40d7f19dae9c52e7e6dc1a20845f4ffe..742a7e96c1b57d73a565eab8eda5511e9a78e18a 100644 (file)
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct device *hwmon;
+       struct thermal_zone_device *thermal;
        int error;
        u32 reg;
        bool ts_attached;
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        if (IS_ERR(hwmon))
                return PTR_ERR(hwmon);
 
-       devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+       thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+                                                      &sun4i_ts_tz_ops);
+       if (IS_ERR(thermal))
+               return PTR_ERR(thermal);
 
        writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
 
index 1dd47dda71cd65c543e040c97587898d422373aa..34d31c7ec8bad51f945f1f9c9380628400c48189 100644 (file)
@@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface,
        int error;
 
        /* Check if we really have the right interface. */
-       iface_desc = &interface->altsetting[0];
+       iface_desc = interface->cur_altsetting;
        if (iface_desc->desc.bInterfaceClass != 0xFF)
                return -ENODEV;
 
index c49afbea3458acadbbec159d251019409b8d96d8..2f9304d1db49635289e8d8db209a6cbf8141efaf 100644 (file)
@@ -6,13 +6,13 @@ config INTERCONNECT_QCOM
          Support for Qualcomm's Network-on-Chip interconnect hardware.
 
 config INTERCONNECT_QCOM_MSM8974
-       tristate "Qualcomm MSM8974 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on QCOM_SMD_RPM
-       select INTERCONNECT_QCOM_SMD_RPM
-       help
-         This is a driver for the Qualcomm Network-on-Chip on msm8974-based
-         platforms.
+       tristate "Qualcomm MSM8974 interconnect driver"
+       depends on INTERCONNECT_QCOM
+       depends on QCOM_SMD_RPM
+       select INTERCONNECT_QCOM_SMD_RPM
+       help
+        This is a driver for the Qualcomm Network-on-Chip on msm8974-based
+        platforms.
 
 config INTERCONNECT_QCOM_QCS404
        tristate "Qualcomm QCS404 interconnect driver"
index ce599a0c83d9583416a61f583c08742487606b60..bf8bd1aee358dd52fcfd99f8a1541c5f394463c6 100644 (file)
@@ -652,7 +652,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct icc_onecell_data *data;
        struct icc_provider *provider;
-       struct icc_node *node;
+       struct icc_node *node, *tmp;
        size_t num_nodes, i;
        int ret;
 
@@ -732,7 +732,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
        return 0;
 
 err_del_icc:
-       list_for_each_entry(node, &provider->nodes, node_list) {
+       list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
                icc_node_del(node);
                icc_node_destroy(node->id);
        }
@@ -748,9 +748,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
 {
        struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
        struct icc_provider *provider = &qp->provider;
-       struct icc_node *n;
+       struct icc_node *n, *tmp;
 
-       list_for_each_entry(n, &provider->nodes, node_list) {
+       list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
                icc_node_del(n);
                icc_node_destroy(n->id);
        }
index b4966d8f3348d269e90f2c69f69d167ba613f905..8e0735a87040029f7854f70390de5596f0f88860 100644 (file)
@@ -414,7 +414,7 @@ static int qnoc_probe(struct platform_device *pdev)
        struct icc_provider *provider;
        struct qcom_icc_node **qnodes;
        struct qcom_icc_provider *qp;
-       struct icc_node *node;
+       struct icc_node *node, *tmp;
        size_t num_nodes, i;
        int ret;
 
@@ -494,7 +494,7 @@ static int qnoc_probe(struct platform_device *pdev)
 
        return 0;
 err:
-       list_for_each_entry(node, &provider->nodes, node_list) {
+       list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) {
                icc_node_del(node);
                icc_node_destroy(node->id);
        }
@@ -508,9 +508,9 @@ static int qnoc_remove(struct platform_device *pdev)
 {
        struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
        struct icc_provider *provider = &qp->provider;
-       struct icc_node *n;
+       struct icc_node *n, *tmp;
 
-       list_for_each_entry(n, &provider->nodes, node_list) {
+       list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
                icc_node_del(n);
                icc_node_destroy(n->id);
        }
index 502a6c22b41eea9ded02b65a51b68aa028a78ca6..387267ee9648509fa39bac0c25c5fd6db1fe1c46 100644 (file)
@@ -868,9 +868,9 @@ static int qnoc_remove(struct platform_device *pdev)
 {
        struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
        struct icc_provider *provider = &qp->provider;
-       struct icc_node *n;
+       struct icc_node *n, *tmp;
 
-       list_for_each_entry(n, &provider->nodes, node_list) {
+       list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) {
                icc_node_del(n);
                icc_node_destroy(n->id);
        }
index 568c52317757ca2924598b69c7329dcfad940356..823cc4ef51fd4be150fbc70309b44094762bc203 100644 (file)
@@ -440,7 +440,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
                return NULL;
        }
 
-       return (u8 __iomem *)ioremap_nocache(address, end);
+       return (u8 __iomem *)ioremap(address, end);
 }
 
 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
 {
        struct pci_dev *pdev = iommu->dev;
-       u64 val = 0xabcd, val2 = 0;
+       u64 val = 0xabcd, val2 = 0, save_reg = 0;
 
        if (!iommu_feature(iommu, FEATURE_PC))
                return;
 
        amd_iommu_pc_present = true;
 
+       /* save the value to restore, if writable */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+               goto pc_false;
+
        /* Check if the performance counters can be written to */
        if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
            (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
-           (val != val2)) {
-               pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
-               amd_iommu_pc_present = false;
-               return;
-       }
+           (val != val2))
+               goto pc_false;
+
+       /* restore */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+               goto pc_false;
 
        pci_info(pdev, "IOMMU performance counters supported\n");
 
        val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
        iommu->max_banks = (u8) ((val >> 12) & 0x3f);
        iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+       return;
+
+pc_false:
+       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+       amd_iommu_pc_present = false;
+       return;
 }
 
 static ssize_t amd_iommu_show_cap(struct device *dev,
index 0cc702a70a9627a059a0ec01d6346d418c882aca..a2e96a5fd9a7b3a6d9e2bf35ea1a8d7bdc78f1f2 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/iova.h>
 #include <linux/irq.h>
 #include <linux/mm.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
@@ -44,7 +45,6 @@ struct iommu_dma_cookie {
                dma_addr_t              msi_iova;
        };
        struct list_head                msi_page_list;
-       spinlock_t                      msi_lock;
 
        /* Domain for flush queue callback; NULL if flush queue not in use */
        struct iommu_domain             *fq_domain;
@@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
 
        cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
        if (cookie) {
-               spin_lock_init(&cookie->msi_lock);
                INIT_LIST_HEAD(&cookie->msi_page_list);
                cookie->type = type;
        }
@@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
 }
 
 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
-               size_t size, dma_addr_t dma_limit, struct device *dev)
+               size_t size, u64 dma_limit, struct device *dev)
 {
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
@@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
        dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
 
        if (domain->geometry.force_aperture)
-               dma_limit = min(dma_limit, domain->geometry.aperture_end);
+               dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
 
        /* Try to get PCI devices a SAC address */
        if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
@@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-               size_t size, int prot, dma_addr_t dma_mask)
+               size_t size, int prot, u64 dma_mask)
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
                if (msi_page->phys == msi_addr)
                        return msi_page;
 
-       msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
+       msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
        if (!msi_page)
                return NULL;
 
@@ -1204,25 +1203,22 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
 {
        struct device *dev = msi_desc_to_dev(desc);
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct iommu_dma_cookie *cookie;
        struct iommu_dma_msi_page *msi_page;
-       unsigned long flags;
+       static DEFINE_MUTEX(msi_prepare_lock); /* see below */
 
        if (!domain || !domain->iova_cookie) {
                desc->iommu_cookie = NULL;
                return 0;
        }
 
-       cookie = domain->iova_cookie;
-
        /*
-        * We disable IRQs to rule out a possible inversion against
-        * irq_desc_lock if, say, someone tries to retarget the affinity
-        * of an MSI from within an IPI handler.
+        * In fact the whole prepare operation should already be serialised by
+        * irq_domain_mutex further up the callchain, but that's pretty subtle
+        * on its own, so consider this locking as failsafe documentation...
         */
-       spin_lock_irqsave(&cookie->msi_lock, flags);
+       mutex_lock(&msi_prepare_lock);
        msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
-       spin_unlock_irqrestore(&cookie->msi_lock, flags);
+       mutex_unlock(&msi_prepare_lock);
 
        msi_desc_set_iommu_cookie(desc, msi_page);
 
index 0c8d81f56a306800ca0d065716e632f48ad3eec3..932267f49f9a8c6529c120c938262a1d15f1f11f 100644 (file)
@@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
 
        spin_lock_irqsave(&device_domain_lock, flags);
        info = dev->archdata.iommu;
-       if (info)
+       if (info && info != DEFER_DEVICE_DOMAIN_INFO
+           && info != DUMMY_DEVICE_DOMAIN_INFO)
                __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
@@ -5478,9 +5479,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
        int prot = 0;
        int ret;
 
-       if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
-               return -EINVAL;
-
        if (iommu_prot & IOMMU_READ)
                prot |= DMA_PTE_READ;
        if (iommu_prot & IOMMU_WRITE)
@@ -5523,8 +5521,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
        /* Cope with horrid API which requires us to unmap more than the
           size argument if it happens to be a large-page mapping. */
        BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
-       if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
-               return 0;
 
        if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
                size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -5556,9 +5552,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        int level = 0;
        u64 phys = 0;
 
-       if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
-               return 0;
-
        pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
        if (pte)
                phys = dma_pte_addr(pte);
@@ -5632,8 +5625,10 @@ static int intel_iommu_add_device(struct device *dev)
 
        group = iommu_group_get_for_dev(dev);
 
-       if (IS_ERR(group))
-               return PTR_ERR(group);
+       if (IS_ERR(group)) {
+               ret = PTR_ERR(group);
+               goto unlink;
+       }
 
        iommu_group_put(group);
 
@@ -5659,7 +5654,8 @@ static int intel_iommu_add_device(struct device *dev)
                                if (!get_private_domain_for_dev(dev)) {
                                        dev_warn(dev,
                                                 "Failed to get a private domain.\n");
-                                       return -ENOMEM;
+                                       ret = -ENOMEM;
+                                       goto unlink;
                                }
 
                                dev_info(dev,
@@ -5674,6 +5670,10 @@ static int intel_iommu_add_device(struct device *dev)
        }
 
        return 0;
+
+unlink:
+       iommu_device_unlink(&iommu->iommu, dev);
+       return ret;
 }
 
 static void intel_iommu_remove_device(struct device *dev)
@@ -5736,8 +5736,8 @@ static void intel_iommu_get_resv_regions(struct device *device,
                struct pci_dev *pdev = to_pci_dev(device);
 
                if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
-                       reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
-                                                     IOMMU_RESV_DIRECT);
+                       reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
+                                                  IOMMU_RESV_DIRECT_RELAXABLE);
                        if (reg)
                                list_add_tail(&reg->list, head);
                }
@@ -5825,6 +5825,13 @@ static void intel_iommu_apply_resv_region(struct device *dev,
        WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
 }
 
+static struct iommu_group *intel_iommu_device_group(struct device *dev)
+{
+       if (dev_is_pci(dev))
+               return pci_device_group(dev);
+       return generic_device_group(dev);
+}
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 {
@@ -5997,7 +6004,7 @@ const struct iommu_ops intel_iommu_ops = {
        .get_resv_regions       = intel_iommu_get_resv_regions,
        .put_resv_regions       = intel_iommu_put_resv_regions,
        .apply_resv_region      = intel_iommu_apply_resv_region,
-       .device_group           = pci_device_group,
+       .device_group           = intel_iommu_device_group,
        .dev_has_feat           = intel_iommu_dev_has_feat,
        .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
        .dev_enable_feat        = intel_iommu_dev_enable_feat,
index 9b159132405dc750111432bb5435abcae11040af..dca88f9fdf29a10b574ed5d123eabf4df7ac7a06 100644 (file)
@@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
 {
        struct qi_desc desc;
 
-       /*
-        * Do PASID granu IOTLB invalidation if page selective capability is
-        * not available.
-        */
-       if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
+       if (pages == -1) {
                desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
                        QI_EIOTLB_DID(sdev->did) |
                        QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
index db7bfd4f2d20efe078581530bebba77b474239df..3ead597e1c5703c31feb5d3080efb69a33a65cee 100644 (file)
@@ -312,8 +312,8 @@ int iommu_insert_resv_region(struct iommu_resv_region *new,
        list_for_each_entry_safe(iter, tmp, regions, list) {
                phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
 
-               /* no merge needed on elements of different types than @nr */
-               if (iter->type != nr->type) {
+               /* no merge needed on elements of different types than @new */
+               if (iter->type != new->type) {
                        list_move_tail(&iter->list, &stack);
                        continue;
                }
@@ -751,6 +751,7 @@ err_put_group:
        mutex_unlock(&group->mutex);
        dev->iommu_group = NULL;
        kobject_put(group->devices_kobj);
+       sysfs_remove_link(group->devices_kobj, device->name);
 err_free_name:
        kfree(device->name);
 err_remove_link:
@@ -2282,13 +2283,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
                goto out;
        }
 
-       iommu_group_create_direct_mappings(group, dev);
-
        /* Make the domain the default for this group */
        if (group->default_domain)
                iommu_domain_free(group->default_domain);
        group->default_domain = domain;
 
+       iommu_group_create_direct_mappings(group, dev);
+
        dev_info(dev, "Using iommu %s mapping\n",
                 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
 
index 41c605b0058f9615c2dbdd83f1de2404a9b1d255..c7a914b9bbbc45d2b85571611ac39839dde55add 100644 (file)
@@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex);
 
 struct iova *alloc_iova_mem(void)
 {
-       return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+       return kmem_cache_zalloc(iova_cache, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(alloc_iova_mem);
 
index d246d74ec3a5b964cb3608e5b82f28c8e669ca16..23445ebfda5c1fea492de4ece16e7fd5bf32f78c 100644 (file)
@@ -298,7 +298,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
 
        /* Map internal tpci200 driver user space */
        tpci200->info->interface_regs =
-               ioremap_nocache(pci_resource_start(tpci200->info->pdev,
+               ioremap(pci_resource_start(tpci200->info->pdev,
                                           TPCI200_IP_INTERFACE_BAR),
                        TPCI200_IFACE_SIZE);
        if (!tpci200->info->interface_regs) {
@@ -541,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                ret = -EBUSY;
                goto out_err_pci_request;
        }
-       tpci200->info->cfg_regs = ioremap_nocache(
+       tpci200->info->cfg_regs = ioremap(
                        pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
                        pci_resource_len(pdev, TPCI200_CFG_MEM_BAR));
        if (!tpci200->info->cfg_regs) {
index 9c2a4b5d30cfc0c3d7ba07ee15eccd7668e179d6..d480a514c983792082e4715c7a9990812db372ba 100644 (file)
@@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
        ipoctal->board_id = ipoctal->dev->id_device;
 
        region = &ipoctal->dev->region[IPACK_IO_SPACE];
-       addr = devm_ioremap_nocache(&ipoctal->dev->dev,
+       addr = devm_ioremap(&ipoctal->dev->dev,
                                    region->start, region->size);
        if (!addr) {
                dev_err(&ipoctal->dev->dev,
@@ -292,7 +292,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
 
        region = &ipoctal->dev->region[IPACK_INT_SPACE];
        ipoctal->int_space =
-               devm_ioremap_nocache(&ipoctal->dev->dev,
+               devm_ioremap(&ipoctal->dev->dev,
                                     region->start, region->size);
        if (!ipoctal->int_space) {
                dev_err(&ipoctal->dev->dev,
@@ -303,7 +303,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
 
        region = &ipoctal->dev->region[IPACK_MEM8_SPACE];
        ipoctal->mem8_space =
-               devm_ioremap_nocache(&ipoctal->dev->dev,
+               devm_ioremap(&ipoctal->dev->dev,
                                     region->start, 0x8000);
        if (!ipoctal->mem8_space) {
                dev_err(&ipoctal->dev->dev,
index 697e6a8ccaaed8df91074d18e4bbac31a8fb2792..1006c694d9fb8f34ca8187a6ef73958d637910bb 100644 (file)
@@ -457,6 +457,12 @@ config IMX_IRQSTEER
        help
          Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
 
+config IMX_INTMUX
+       def_bool y if ARCH_MXC
+       select IRQ_DOMAIN
+       help
+         Support for the i.MX INTMUX interrupt multiplexer.
+
 config LS1X_IRQ
        bool "Loongson-1 Interrupt Controller"
        depends on MACH_LOONGSON32
@@ -490,6 +496,7 @@ config TI_SCI_INTA_IRQCHIP
 config SIFIVE_PLIC
        bool "SiFive Platform-Level Interrupt Controller"
        depends on RISCV
+       select IRQ_DOMAIN_HIERARCHY
        help
           This enables support for the PLIC chip found in SiFive (and
           potentially other) RISC-V systems.  The PLIC controls devices
@@ -499,4 +506,11 @@ config SIFIVE_PLIC
 
           If you don't know what to do here, say Y.
 
+config EXYNOS_IRQ_COMBINER
+       bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
+       depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
+       help
+         Say yes here to add support for the IRQ combiner devices embedded
+         in Samsung Exynos chips.
+
 endmenu
index e806dda690ea8a24a0919baca41b4488d2249bc5..eae0d78cbf22c8528f2b2f6694df677a95692f9e 100644 (file)
@@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835)              += irq-bcm2835.o
 obj-$(CONFIG_ARCH_BCM2835)             += irq-bcm2836.o
 obj-$(CONFIG_DAVINCI_AINTC)            += irq-davinci-aintc.o
 obj-$(CONFIG_DAVINCI_CP_INTC)          += irq-davinci-cp-intc.o
-obj-$(CONFIG_ARCH_EXYNOS)              += exynos-combiner.o
+obj-$(CONFIG_EXYNOS_IRQ_COMBINER)      += exynos-combiner.o
 obj-$(CONFIG_FARADAY_FTINTC010)                += irq-ftintc010.o
 obj-$(CONFIG_ARCH_HIP04)               += irq-hip04.o
 obj-$(CONFIG_ARCH_LPC32XX)             += irq-lpc32xx.o
@@ -87,7 +87,7 @@ obj-$(CONFIG_MVEBU_SEI)                       += irq-mvebu-sei.o
 obj-$(CONFIG_LS_EXTIRQ)                        += irq-ls-extirq.o
 obj-$(CONFIG_LS_SCFG_MSI)              += irq-ls-scfg-msi.o
 obj-$(CONFIG_EZNPS_GIC)                        += irq-eznps.o
-obj-$(CONFIG_ARCH_ASPEED)              += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
+obj-$(CONFIG_ARCH_ASPEED)              += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
 obj-$(CONFIG_STM32_EXTI)               += irq-stm32-exti.o
 obj-$(CONFIG_QCOM_IRQ_COMBINER)                += qcom-irq-combiner.o
 obj-$(CONFIG_IRQ_UNIPHIER_AIDET)       += irq-uniphier-aidet.o
@@ -100,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC)           += irq-csky-mpintc.o
 obj-$(CONFIG_CSKY_APB_INTC)            += irq-csky-apb-intc.o
 obj-$(CONFIG_SIFIVE_PLIC)              += irq-sifive-plic.o
 obj-$(CONFIG_IMX_IRQSTEER)             += irq-imx-irqsteer.o
+obj-$(CONFIG_IMX_INTMUX)               += irq-imx-intmux.o
 obj-$(CONFIG_MADERA_IRQ)               += irq-madera.o
 obj-$(CONFIG_LS1X_IRQ)                 += irq-ls1x.o
 obj-$(CONFIG_TI_SCI_INTR_IRQCHIP)      += irq-ti-sci-intr.o
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
new file mode 100644 (file)
index 0000000..c90a334
--- /dev/null
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Copyright 2019 IBM Corporation
+ *
+ * Eddie James <eajames@linux.ibm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#define ASPEED_SCU_IC_REG              0x018
+#define ASPEED_SCU_IC_SHIFT            0
+#define ASPEED_SCU_IC_ENABLE           GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_NUM_IRQS         7
+#define ASPEED_SCU_IC_STATUS_SHIFT     16
+
+#define ASPEED_AST2600_SCU_IC0_REG     0x560
+#define ASPEED_AST2600_SCU_IC0_SHIFT   0
+#define ASPEED_AST2600_SCU_IC0_ENABLE  \
+       GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
+#define ASPEED_AST2600_SCU_IC0_NUM_IRQS        6
+
+#define ASPEED_AST2600_SCU_IC1_REG     0x570
+#define ASPEED_AST2600_SCU_IC1_SHIFT   4
+#define ASPEED_AST2600_SCU_IC1_ENABLE  \
+       GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
+#define ASPEED_AST2600_SCU_IC1_NUM_IRQS        2
+
+struct aspeed_scu_ic {
+       unsigned long irq_enable;
+       unsigned long irq_shift;
+       unsigned int num_irqs;
+       unsigned int reg;
+       struct regmap *scu;
+       struct irq_domain *irq_domain;
+};
+
+static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+{
+       unsigned int irq;
+       unsigned int sts;
+       unsigned long bit;
+       unsigned long enabled;
+       unsigned long max;
+       unsigned long status;
+       struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+
+       chained_irq_enter(chip, desc);
+
+       /*
+        * The SCU IC has just one register to control its operation and read
+        * status. The interrupt enable bits occupy the lower 16 bits of the
+        * register, while the interrupt status bits occupy the upper 16 bits.
+        * The status bit for a given interrupt is always 16 bits shifted from
+        * the enable bit for the same interrupt.
+        * Therefore, perform the IRQ operations in the enable bit space by
+        * shifting the status down to get the mapping and then back up to
+        * clear the bit.
+        */
+       regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+       enabled = sts & scu_ic->irq_enable;
+       status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
+
+       bit = scu_ic->irq_shift;
+       max = scu_ic->num_irqs + bit;
+
+       for_each_set_bit_from(bit, &status, max) {
+               irq = irq_find_mapping(scu_ic->irq_domain,
+                                      bit - scu_ic->irq_shift);
+               generic_handle_irq(irq);
+
+               regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
+                                  BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+{
+       struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+       unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
+               (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+       /*
+        * Status bits are cleared by writing 1. In order to prevent the mask
+        * operation from clearing the status bits, they should be under the
+        * mask and written with 0.
+        */
+       regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+}
+
+static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+{
+       struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+       unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+       unsigned int mask = bit |
+               (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+
+       /*
+        * Status bits are cleared by writing 1. In order to prevent the unmask
+        * operation from clearing the status bits, they should be under the
+        * mask and written with 0.
+        */
+       regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+}
+
+static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
+                                         const struct cpumask *dest,
+                                         bool force)
+{
+       return -EINVAL;
+}
+
+static struct irq_chip aspeed_scu_ic_chip = {
+       .name                   = "aspeed-scu-ic",
+       .irq_mask               = aspeed_scu_ic_irq_mask,
+       .irq_unmask             = aspeed_scu_ic_irq_unmask,
+       .irq_set_affinity       = aspeed_scu_ic_irq_set_affinity,
+};
+
+static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
+                            irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+       irq_set_chip_data(irq, domain->host_data);
+
+       return 0;
+}
+
+static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
+       .map = aspeed_scu_ic_map,
+};
+
+static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
+                                       struct device_node *node)
+{
+       int irq;
+       int rc = 0;
+
+       if (!node->parent) {
+               rc = -ENODEV;
+               goto err;
+       }
+
+       scu_ic->scu = syscon_node_to_regmap(node->parent);
+       if (IS_ERR(scu_ic->scu)) {
+               rc = PTR_ERR(scu_ic->scu);
+               goto err;
+       }
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq < 0) {
+               rc = irq;
+               goto err;
+       }
+
+       scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+                                                  &aspeed_scu_ic_domain_ops,
+                                                  scu_ic);
+       if (!scu_ic->irq_domain) {
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+                                        scu_ic);
+
+       return 0;
+
+err:
+       kfree(scu_ic);
+
+       return rc;
+}
+
+static int __init aspeed_scu_ic_of_init(struct device_node *node,
+                                       struct device_node *parent)
+{
+       struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+       if (!scu_ic)
+               return -ENOMEM;
+
+       scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
+       scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
+       scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
+       scu_ic->reg = ASPEED_SCU_IC_REG;
+
+       return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
+                                                struct device_node *parent)
+{
+       struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+       if (!scu_ic)
+               return -ENOMEM;
+
+       scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
+       scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
+       scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
+       scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
+
+       return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
+                                                struct device_node *parent)
+{
+       struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+
+       if (!scu_ic)
+               return -ENOMEM;
+
+       scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
+       scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
+       scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
+       scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+
+       return aspeed_scu_ic_of_init_common(scu_ic, node);
+}
+
+IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
+               aspeed_ast2600_scu_ic0_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
+               aspeed_ast2600_scu_ic1_of_init);
index e05673bcd52bdad6adff75c48140baaeea1a79c1..f71758632f8dfd15979ed88d9fdf03a8ef071097 100644 (file)
@@ -106,6 +106,7 @@ struct its_node {
        u64                     typer;
        u64                     cbaser_save;
        u32                     ctlr_save;
+       u32                     mpidr;
        struct list_head        its_device_list;
        u64                     flags;
        unsigned long           list_nr;
@@ -116,12 +117,22 @@ struct its_node {
 };
 
 #define is_v4(its)             (!!((its)->typer & GITS_TYPER_VLPIS))
+#define is_v4_1(its)           (!!((its)->typer & GITS_TYPER_VMAPP))
 #define device_ids(its)                (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
 
 #define ITS_ITT_ALIGN          SZ_256
 
 /* The maximum number of VPEID bits supported by VLPI commands */
-#define ITS_MAX_VPEID_BITS     (16)
+#define ITS_MAX_VPEID_BITS                                             \
+       ({                                                              \
+               int nvpeid = 16;                                        \
+               if (gic_rdists->has_rvpeid &&                           \
+                   gic_rdists->gicd_typer2 & GICD_TYPER2_VIL)          \
+                       nvpeid = 1 + (gic_rdists->gicd_typer2 &         \
+                                     GICD_TYPER2_VID);                 \
+                                                                       \
+               nvpeid;                                                 \
+       })
 #define ITS_MAX_VPEID          (1 << (ITS_MAX_VPEID_BITS))
 
 /* Convert page order to size in bytes */
@@ -216,11 +227,27 @@ static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
        return &its_dev->event_map.vlpi_maps[event];
 }
 
-static struct its_collection *irq_to_col(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+{
+       if (irqd_is_forwarded_to_vcpu(d)) {
+               struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+               u32 event = its_get_event_id(d);
+
+               return dev_event_to_vlpi_map(its_dev, event);
+       }
+
+       return NULL;
+}
+
+static int irq_to_cpuid(struct irq_data *d)
 {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       struct its_vlpi_map *map = get_vlpi_map(d);
 
-       return dev_event_to_col(its_dev, its_get_event_id(d));
+       if (map)
+               return map->vpe->col_idx;
+
+       return its_dev->event_map.col_map[its_get_event_id(d)];
 }
 
 static struct its_collection *valid_col(struct its_collection *col)
@@ -322,6 +349,10 @@ struct its_cmd_desc {
                        u16 seq_num;
                        u16 its_list;
                } its_vmovp_cmd;
+
+               struct {
+                       struct its_vpe *vpe;
+               } its_invdb_cmd;
        };
 };
 
@@ -438,6 +469,38 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
        its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
 }
 
+static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
+{
+       its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
+}
+
+static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
+{
+       its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
+}
+
+static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
+{
+       its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
+}
+
+static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
+                                       u32 vpe_db_lpi)
+{
+       its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
+                                       u32 vpe_db_lpi)
+{
+       its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
+}
+
+static void its_encode_db(struct its_cmd_block *cmd, bool db)
+{
+       its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
+}
+
 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
 {
        /* Let's fixup BE commands */
@@ -621,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
                                           struct its_cmd_block *cmd,
                                           struct its_cmd_desc *desc)
 {
-       unsigned long vpt_addr;
+       unsigned long vpt_addr, vconf_addr;
        u64 target;
-
-       vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
-       target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+       bool alloc;
 
        its_encode_cmd(cmd, GITS_CMD_VMAPP);
        its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
        its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+       if (!desc->its_vmapp_cmd.valid) {
+               if (is_v4_1(its)) {
+                       alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+                       its_encode_alloc(cmd, alloc);
+               }
+
+               goto out;
+       }
+
+       vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+       target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
+
        its_encode_target(cmd, target);
        its_encode_vpt_addr(cmd, vpt_addr);
        its_encode_vpt_size(cmd, LPI_NRBITS - 1);
 
+       if (!is_v4_1(its))
+               goto out;
+
+       vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+       alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
+       its_encode_alloc(cmd, alloc);
+
+       /* We can only signal PTZ when alloc==1. Why do we have two bits? */
+       its_encode_ptz(cmd, alloc);
+       its_encode_vconf_addr(cmd, vconf_addr);
+       its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
+
+out:
        its_fixup_cmd(cmd);
 
        return valid_vpe(its, desc->its_vmapp_cmd.vpe);
@@ -645,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
 {
        u32 db;
 
-       if (desc->its_vmapti_cmd.db_enabled)
+       if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
                db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
        else
                db = 1023;
@@ -668,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
 {
        u32 db;
 
-       if (desc->its_vmovi_cmd.db_enabled)
+       if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
                db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
        else
                db = 1023;
@@ -698,6 +787,11 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
        its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
        its_encode_target(cmd, target);
 
+       if (is_v4_1(its)) {
+               its_encode_db(cmd, true);
+               its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
+       }
+
        its_fixup_cmd(cmd);
 
        return valid_vpe(its, desc->its_vmovp_cmd.vpe);
@@ -757,6 +851,21 @@ static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
        return valid_vpe(its, map->vpe);
 }
 
+static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
+                                          struct its_cmd_block *cmd,
+                                          struct its_cmd_desc *desc)
+{
+       if (WARN_ON(!is_v4_1(its)))
+               return NULL;
+
+       its_encode_cmd(cmd, GITS_CMD_INVDB);
+       its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
+
+       its_fixup_cmd(cmd);
+
+       return valid_vpe(its, desc->its_invdb_cmd.vpe);
+}
+
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
                                 struct its_cmd_block *ptr)
 {
@@ -1165,20 +1274,17 @@ static void its_send_vclear(struct its_device *dev, u32 event_id)
        its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
 }
 
-/*
- * irqchip functions - assumes MSI, mostly.
- */
-static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
+static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
 {
-       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
-       u32 event = its_get_event_id(d);
-
-       if (!irqd_is_forwarded_to_vcpu(d))
-               return NULL;
+       struct its_cmd_desc desc;
 
-       return dev_event_to_vlpi_map(its_dev, event);
+       desc.its_invdb_cmd.vpe = vpe;
+       its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
 }
 
+/*
+ * irqchip functions - assumes MSI, mostly.
+ */
 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
 {
        struct its_vlpi_map *map = get_vlpi_map(d);
@@ -1221,13 +1327,25 @@ static void wait_for_syncr(void __iomem *rdbase)
 
 static void direct_lpi_inv(struct irq_data *d)
 {
-       struct its_collection *col;
+       struct its_vlpi_map *map = get_vlpi_map(d);
        void __iomem *rdbase;
+       u64 val;
+
+       if (map) {
+               struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+               WARN_ON(!is_v4_1(its_dev->its));
+
+               val  = GICR_INVLPIR_V;
+               val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
+               val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
+       } else {
+               val = d->hwirq;
+       }
 
        /* Target the redistributor this LPI is currently routed to */
-       col = irq_to_col(d);
-       rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
-       gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+       rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
+       gic_write_lpir(val, rdbase + GICR_INVLPIR);
 
        wait_for_syncr(rdbase);
 }
@@ -1237,7 +1355,8 @@ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 
        lpi_write_config(d, clr, set);
-       if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+       if (gic_rdists->has_direct_lpi &&
+           (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
                direct_lpi_inv(d);
        else if (!irqd_is_forwarded_to_vcpu(d))
                its_send_inv(its_dev, its_get_event_id(d));
@@ -1251,6 +1370,13 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
        u32 event = its_get_event_id(d);
        struct its_vlpi_map *map;
 
+       /*
+        * GICv4.1 does away with the per-LPI nonsense, nothing to do
+        * here.
+        */
+       if (is_v4_1(its_dev->its))
+               return;
+
        map = dev_event_to_vlpi_map(its_dev, event);
 
        if (map->db_enabled == enable)
@@ -2090,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its,
        return indirect;
 }
 
+static u32 compute_common_aff(u64 val)
+{
+       u32 aff, clpiaff;
+
+       aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
+       clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
+
+       return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
+}
+
+static u32 compute_its_aff(struct its_node *its)
+{
+       u64 val;
+       u32 svpet;
+
+       /*
+        * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
+        * the resulting affinity. We then use that to see if this match
+        * our own affinity.
+        */
+       svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
+       val  = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
+       val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
+       return compute_common_aff(val);
+}
+
+static struct its_node *find_sibling_its(struct its_node *cur_its)
+{
+       struct its_node *its;
+       u32 aff;
+
+       if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
+               return NULL;
+
+       aff = compute_its_aff(cur_its);
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               u64 baser;
+
+               if (!is_v4_1(its) || its == cur_its)
+                       continue;
+
+               if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+                       continue;
+
+               if (aff != compute_its_aff(its))
+                       continue;
+
+               /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+               baser = its->tables[2].val;
+               if (!(baser & GITS_BASER_VALID))
+                       continue;
+
+               return its;
+       }
+
+       return NULL;
+}
+
 static void its_free_tables(struct its_node *its)
 {
        int i;
@@ -2132,6 +2317,17 @@ static int its_alloc_tables(struct its_node *its)
                        break;
 
                case GITS_BASER_TYPE_VCPU:
+                       if (is_v4_1(its)) {
+                               struct its_node *sibling;
+
+                               WARN_ON(i != 2);
+                               if ((sibling = find_sibling_its(its))) {
+                                       *baser = sibling->tables[2];
+                                       its_write_baser(its, baser, baser->val);
+                                       continue;
+                               }
+                       }
+
                        indirect = its_parse_indirect_baser(its, baser,
                                                            psz, &order,
                                                            ITS_MAX_VPEID_BITS);
@@ -2153,6 +2349,220 @@ static int its_alloc_tables(struct its_node *its)
        return 0;
 }
 
+static u64 inherit_vpe_l1_table_from_its(void)
+{
+       struct its_node *its;
+       u64 val;
+       u32 aff;
+
+       val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+       aff = compute_common_aff(val);
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               u64 baser, addr;
+
+               if (!is_v4_1(its))
+                       continue;
+
+               if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
+                       continue;
+
+               if (aff != compute_its_aff(its))
+                       continue;
+
+               /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
+               baser = its->tables[2].val;
+               if (!(baser & GITS_BASER_VALID))
+                       continue;
+
+               /* We have a winner! */
+               val  = GICR_VPROPBASER_4_1_VALID;
+               if (baser & GITS_BASER_INDIRECT)
+                       val |= GICR_VPROPBASER_4_1_INDIRECT;
+               val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
+                                 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
+               switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
+               case GIC_PAGE_SIZE_64K:
+                       addr = GITS_BASER_ADDR_48_to_52(baser);
+                       break;
+               default:
+                       addr = baser & GENMASK_ULL(47, 12);
+                       break;
+               }
+               val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
+               val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+                                 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+               val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+                                 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+               val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+
+               return val;
+       }
+
+       return 0;
+}
+
+static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
+{
+       u32 aff;
+       u64 val;
+       int cpu;
+
+       val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
+       aff = compute_common_aff(val);
+
+       for_each_possible_cpu(cpu) {
+               void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
+               u32 tmp;
+
+               if (!base || cpu == smp_processor_id())
+                       continue;
+
+               val = gic_read_typer(base + GICR_TYPER);
+               tmp = compute_common_aff(val);
+               if (tmp != aff)
+                       continue;
+
+               /*
+                * At this point, we have a victim. This particular CPU
+                * has already booted, and has an affinity that matches
+                * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
+                * Make sure we don't write the Z bit in that case.
+                */
+               val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
+               val &= ~GICR_VPROPBASER_4_1_Z;
+
+               *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+
+               return val;
+       }
+
+       return 0;
+}
+
+static int allocate_vpe_l1_table(void)
+{
+       void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+       u64 val, gpsz, npg, pa;
+       unsigned int psz = SZ_64K;
+       unsigned int np, epp, esz;
+       struct page *page;
+
+       if (!gic_rdists->has_rvpeid)
+               return 0;
+
+       /*
+        * if VPENDBASER.Valid is set, disable any previously programmed
+        * VPE by setting PendingLast while clearing Valid. This has the
+        * effect of making sure no doorbell will be generated and we can
+        * then safely clear VPROPBASER.Valid.
+        */
+       if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
+               gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+                                     vlpi_base + GICR_VPENDBASER);
+
+       /*
+        * If we can inherit the configuration from another RD, let's do
+        * so. Otherwise, we have to go through the allocation process. We
+        * assume that all RDs have the exact same requirements, as
+        * nothing will work otherwise.
+        */
+       val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
+       if (val & GICR_VPROPBASER_4_1_VALID)
+               goto out;
+
+       gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
+       if (!gic_data_rdist()->vpe_table_mask)
+               return -ENOMEM;
+
+       val = inherit_vpe_l1_table_from_its();
+       if (val & GICR_VPROPBASER_4_1_VALID)
+               goto out;
+
+       /* First probe the page size */
+       val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
+       gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+       val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
+       gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
+       esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
+
+       switch (gpsz) {
+       default:
+               gpsz = GIC_PAGE_SIZE_4K;
+               /* fall through */
+       case GIC_PAGE_SIZE_4K:
+               psz = SZ_4K;
+               break;
+       case GIC_PAGE_SIZE_16K:
+               psz = SZ_16K;
+               break;
+       case GIC_PAGE_SIZE_64K:
+               psz = SZ_64K;
+               break;
+       }
+
+       /*
+        * Start populating the register from scratch, including RO fields
+        * (which we want to print in debug cases...)
+        */
+       val = 0;
+       val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
+       val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
+
+       /* How many entries per GIC page? */
+       esz++;
+       epp = psz / (esz * SZ_8);
+
+       /*
+        * If we need more than just a single L1 page, flag the table
+        * as indirect and compute the number of required L1 pages.
+        */
+       if (epp < ITS_MAX_VPEID) {
+               int nl2;
+
+               val |= GICR_VPROPBASER_4_1_INDIRECT;
+
+               /* Number of L2 pages required to cover the VPEID space */
+               nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
+
+               /* Number of L1 pages to point to the L2 pages */
+               npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
+       } else {
+               npg = 1;
+       }
+
+       val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
+
+       /* Right, that's the number of CPU pages we need for L1 */
+       np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
+
+       pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
+                np, npg, psz, epp, esz);
+       page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
+       if (!page)
+               return -ENOMEM;
+
+       gic_data_rdist()->vpe_l1_page = page;
+       pa = virt_to_phys(page_address(page));
+       WARN_ON(!IS_ALIGNED(pa, psz));
+
+       val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
+       val |= GICR_VPROPBASER_RaWb;
+       val |= GICR_VPROPBASER_InnerShareable;
+       val |= GICR_VPROPBASER_4_1_Z;
+       val |= GICR_VPROPBASER_4_1_VALID;
+
+out:
+       gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+       cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
+
+       pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
+                smp_processor_id(), val,
+                cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
+
+       return 0;
+}
+
 static int its_alloc_collections(struct its_node *its)
 {
        int i;
@@ -2244,7 +2654,7 @@ static int __init allocate_lpi_tables(void)
        return 0;
 }
 
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
 {
        u32 count = 1000000;    /* 1s! */
        bool clean;
@@ -2252,6 +2662,8 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
 
        val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
        val &= ~GICR_VPENDBASER_Valid;
+       val &= ~clr;
+       val |= set;
        gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
 
        do {
@@ -2264,6 +2676,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
                }
        } while (!clean && count);
 
+       if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+               pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+               val |= GICR_VPENDBASER_PendingLast;
+       }
+
        return val;
 }
 
@@ -2352,7 +2769,7 @@ static void its_cpu_init_lpis(void)
        val |= GICR_CTLR_ENABLE_LPIS;
        writel_relaxed(val, rbase + GICR_CTLR);
 
-       if (gic_rdists->has_vlpis) {
+       if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
                void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
 
                /*
@@ -2372,10 +2789,20 @@ static void its_cpu_init_lpis(void)
                 * ancient programming gets left in and has possibility of
                 * corrupting memory.
                 */
-               val = its_clear_vpend_valid(vlpi_base);
+               val = its_clear_vpend_valid(vlpi_base, 0, 0);
                WARN_ON(val & GICR_VPENDBASER_Dirty);
        }
 
+       if (allocate_vpe_l1_table()) {
+               /*
+                * If the allocation has failed, we're in massive trouble.
+                * Disable direct injection, and pray that no VM was
+                * already running...
+                */
+               gic_rdists->has_rvpeid = false;
+               gic_rdists->has_vlpis = false;
+       }
+
        /* Make sure the GIC has seen the above */
        dsb(sy);
 out:
@@ -2859,7 +3286,7 @@ static const struct irq_domain_ops its_domain_ops = {
 /*
  * This is insane.
  *
- * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
  * likely), the only way to perform an invalidate is to use a fake
  * device to issue an INV command, implying that the LPI has first
  * been mapped to some event on that device. Since this is not exactly
@@ -2867,9 +3294,20 @@ static const struct irq_domain_ops its_domain_ops = {
  * only issue an UNMAP if we're short on available slots.
  *
  * Broken by design(tm).
+ *
+ * GICv4.1, on the other hand, mandates that we're able to invalidate
+ * by writing to a MMIO register. It doesn't implement the whole of
+ * DirectLPI, but that's good enough. And most of the time, we don't
+ * even have to invalidate anything, as the redistributor can be told
+ * whether to generate a doorbell or not (we thus leave it enabled,
+ * always).
  */
 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
 {
+       /* GICv4.1 doesn't use a proxy, so nothing to do here */
+       if (gic_rdists->has_rvpeid)
+               return;
+
        /* Already unmapped? */
        if (vpe->vpe_proxy_event == -1)
                return;
@@ -2892,6 +3330,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
 
 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
 {
+       /* GICv4.1 doesn't use a proxy, so nothing to do here */
+       if (gic_rdists->has_rvpeid)
+               return;
+
        if (!gic_rdists->has_direct_lpi) {
                unsigned long flags;
 
@@ -2903,6 +3345,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
 
 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
 {
+       /* GICv4.1 doesn't use a proxy, so nothing to do here */
+       if (gic_rdists->has_rvpeid)
+               return;
+
        /* Already mapped? */
        if (vpe->vpe_proxy_event != -1)
                return;
@@ -2925,6 +3371,10 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
        unsigned long flags;
        struct its_collection *target_col;
 
+       /* GICv4.1 doesn't use a proxy, so nothing to do here */
+       if (gic_rdists->has_rvpeid)
+               return;
+
        if (gic_rdists->has_direct_lpi) {
                void __iomem *rdbase;
 
@@ -2951,7 +3401,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
                                bool force)
 {
        struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
-       int cpu = cpumask_first(mask_val);
+       int from, cpu = cpumask_first(mask_val);
 
        /*
         * Changing affinity is mega expensive, so let's be as lazy as
@@ -2959,14 +3409,24 @@ static int its_vpe_set_affinity(struct irq_data *d,
         * into the proxy device, we need to move the doorbell
         * interrupt to its new location.
         */
-       if (vpe->col_idx != cpu) {
-               int from = vpe->col_idx;
+       if (vpe->col_idx == cpu)
+               goto out;
 
-               vpe->col_idx = cpu;
-               its_send_vmovp(vpe);
-               its_vpe_db_proxy_move(vpe, from, cpu);
-       }
+       from = vpe->col_idx;
+       vpe->col_idx = cpu;
 
+       /*
+        * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+        * is sharing its VPE table with the current one.
+        */
+       if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+           cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+               goto out;
+
+       its_send_vmovp(vpe);
+       its_vpe_db_proxy_move(vpe, from, cpu);
+
+out:
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK_DONE;
@@ -3009,16 +3469,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
        void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
        u64 val;
 
-       val = its_clear_vpend_valid(vlpi_base);
+       val = its_clear_vpend_valid(vlpi_base, 0, 0);
 
-       if (unlikely(val & GICR_VPENDBASER_Dirty)) {
-               pr_err_ratelimited("ITS virtual pending table not cleaning\n");
-               vpe->idai = false;
-               vpe->pending_last = true;
-       } else {
-               vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
-               vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
-       }
+       vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+       vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
 }
 
 static void its_vpe_invall(struct its_vpe *vpe)
@@ -3151,6 +3605,139 @@ static struct irq_chip its_vpe_irq_chip = {
        .irq_set_vcpu_affinity  = its_vpe_set_vcpu_affinity,
 };
 
+static struct its_node *find_4_1_its(void)
+{
+       static struct its_node *its = NULL;
+
+       if (!its) {
+               list_for_each_entry(its, &its_nodes, entry) {
+                       if (is_v4_1(its))
+                               return its;
+               }
+
+               /* Oops? */
+               its = NULL;
+       }
+
+       return its;
+}
+
+static void its_vpe_4_1_send_inv(struct irq_data *d)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+       struct its_node *its;
+
+       /*
+        * GICv4.1 wants doorbells to be invalidated using the
+        * INVDB command in order to be broadcast to all RDs. Send
+        * it to the first valid ITS, and let the HW do its magic.
+        */
+       its = find_4_1_its();
+       if (its)
+               its_send_invdb(its, vpe);
+}
+
+static void its_vpe_4_1_mask_irq(struct irq_data *d)
+{
+       lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+       its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_unmask_irq(struct irq_data *d)
+{
+       lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+       its_vpe_4_1_send_inv(d);
+}
+
+static void its_vpe_4_1_schedule(struct its_vpe *vpe,
+                                struct its_cmd_info *info)
+{
+       void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+       u64 val = 0;
+
+       /* Schedule the VPE */
+       val |= GICR_VPENDBASER_Valid;
+       val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
+       val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
+       val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
+
+       gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+                                  struct its_cmd_info *info)
+{
+       void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+       u64 val;
+
+       if (info->req_db) {
+               /*
+                * vPE is going to block: make the vPE non-resident with
+                * PendingLast clear and DB set. The GIC guarantees that if
+                * we read-back PendingLast clear, then a doorbell will be
+                * delivered when an interrupt comes.
+                */
+               val = its_clear_vpend_valid(vlpi_base,
+                                           GICR_VPENDBASER_PendingLast,
+                                           GICR_VPENDBASER_4_1_DB);
+               vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+       } else {
+               /*
+                * We're not blocking, so just make the vPE non-resident
+                * with PendingLast set, indicating that we'll be back.
+                */
+               val = its_clear_vpend_valid(vlpi_base,
+                                           0,
+                                           GICR_VPENDBASER_PendingLast);
+               vpe->pending_last = true;
+       }
+}
+
+static void its_vpe_4_1_invall(struct its_vpe *vpe)
+{
+       void __iomem *rdbase;
+       u64 val;
+
+       val  = GICR_INVALLR_V;
+       val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
+
+       /* Target the redistributor this vPE is currently known on */
+       rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+       gic_write_lpir(val, rdbase + GICR_INVALLR);
+}
+
+static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+       struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+       struct its_cmd_info *info = vcpu_info;
+
+       switch (info->cmd_type) {
+       case SCHEDULE_VPE:
+               its_vpe_4_1_schedule(vpe, info);
+               return 0;
+
+       case DESCHEDULE_VPE:
+               its_vpe_4_1_deschedule(vpe, info);
+               return 0;
+
+       case INVALL_VPE:
+               its_vpe_4_1_invall(vpe);
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static struct irq_chip its_vpe_4_1_irq_chip = {
+       .name                   = "GICv4.1-vpe",
+       .irq_mask               = its_vpe_4_1_mask_irq,
+       .irq_unmask             = its_vpe_4_1_unmask_irq,
+       .irq_eoi                = irq_chip_eoi_parent,
+       .irq_set_affinity       = its_vpe_set_affinity,
+       .irq_set_vcpu_affinity  = its_vpe_4_1_set_vcpu_affinity,
+};
+
 static int its_vpe_id_alloc(void)
 {
        return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
@@ -3186,7 +3773,10 @@ static int its_vpe_init(struct its_vpe *vpe)
 
        vpe->vpe_id = vpe_id;
        vpe->vpt_page = vpt_page;
-       vpe->vpe_proxy_event = -1;
+       if (gic_rdists->has_rvpeid)
+               atomic_set(&vpe->vmapp_count, 0);
+       else
+               vpe->vpe_proxy_event = -1;
 
        return 0;
 }
@@ -3228,6 +3818,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                    unsigned int nr_irqs, void *args)
 {
+       struct irq_chip *irqchip = &its_vpe_irq_chip;
        struct its_vm *vm = args;
        unsigned long *bitmap;
        struct page *vprop_page;
@@ -3255,6 +3846,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
        vm->nr_db_lpis = nr_ids;
        vm->vprop_page = vprop_page;
 
+       if (gic_rdists->has_rvpeid)
+               irqchip = &its_vpe_4_1_irq_chip;
+
        for (i = 0; i < nr_irqs; i++) {
                vm->vpes[i]->vpe_db_lpi = base + i;
                err = its_vpe_init(vm->vpes[i]);
@@ -3265,7 +3859,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
                if (err)
                        break;
                irq_domain_set_hwirq_and_chip(domain, virq + i, i,
-                                             &its_vpe_irq_chip, vm->vpes[i]);
+                                             irqchip, vm->vpes[i]);
                set_bit(i, bitmap);
        }
 
@@ -3778,6 +4372,14 @@ static int __init its_probe_one(struct resource *res,
                } else {
                        pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
                }
+
+               if (is_v4_1(its)) {
+                       u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+                       its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+
+                       pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
+                               &res->start, its->mpidr, svpet);
+               }
        }
 
        its->numa_node = numa_node;
@@ -4138,6 +4740,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
        bool has_v4 = false;
        int err;
 
+       gic_rdists = rdists;
+
        its_parent = parent_domain;
        of_node = to_of_node(handle);
        if (of_node)
@@ -4150,8 +4754,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
                return -ENXIO;
        }
 
-       gic_rdists = rdists;
-
        err = allocate_lpi_tables();
        if (err)
                return err;
index d6218012097b42901e2d727c2b4ca207574cff1c..286f98222878128758da9fc81ba18e5e2fe3e01c 100644 (file)
@@ -858,8 +858,21 @@ static int __gic_update_rdist_properties(struct redist_region *region,
                                         void __iomem *ptr)
 {
        u64 typer = gic_read_typer(ptr + GICR_TYPER);
+
        gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
-       gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+
+       /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
+       gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
+       gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+                                          gic_data.rdists.has_rvpeid);
+
+       /* Detect non-sensical configurations */
+       if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
+               gic_data.rdists.has_direct_lpi = false;
+               gic_data.rdists.has_vlpis = false;
+               gic_data.rdists.has_rvpeid = false;
+       }
+
        gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
 
        return 1;
@@ -872,9 +885,10 @@ static void gic_update_rdist_properties(void)
        if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
                gic_data.ppi_nr = 0;
        pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
-       pr_info("%sVLPI support, %sdirect LPI support\n",
+       pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
                !gic_data.rdists.has_vlpis ? "no " : "",
-               !gic_data.rdists.has_direct_lpi ? "no " : "");
+               !gic_data.rdists.has_direct_lpi ? "no " : "",
+               !gic_data.rdists.has_rvpeid ? "no " : "");
 }
 
 /* Check whether it's single security state view */
@@ -1562,10 +1576,14 @@ static int __init gic_init_bases(void __iomem *dist_base,
 
        pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
        pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
+
+       gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+
        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
                                                 &gic_data);
        irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
        gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+       gic_data.rdists.has_rvpeid = true;
        gic_data.rdists.has_vlpis = true;
        gic_data.rdists.has_direct_lpi = true;
 
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
new file mode 100644 (file)
index 0000000..c27577c
--- /dev/null
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 NXP
+
+/*                     INTMUX Block Diagram
+ *
+ *                               ________________
+ * interrupt source #  0  +---->|                |
+ *                        |     |                |
+ * interrupt source #  1  +++-->|                |
+ *            ...         | |   |   channel # 0  |--------->interrupt out # 0
+ *            ...         | |   |                |
+ *            ...         | |   |                |
+ * interrupt source # X-1 +++-->|________________|
+ *                        | | |
+ *                        | | |
+ *                        | | |  ________________
+ *                        +---->|                |
+ *                        | | | |                |
+ *                        | +-->|                |
+ *                        | | | |   channel # 1  |--------->interrupt out # 1
+ *                        | | +>|                |
+ *                        | | | |                |
+ *                        | | | |________________|
+ *                        | | |
+ *                        | | |
+ *                        | | |       ...
+ *                        | | |       ...
+ *                        | | |
+ *                        | | |  ________________
+ *                        +---->|                |
+ *                          | | |                |
+ *                          +-->|                |
+ *                            | |   channel # N  |--------->interrupt out # N
+ *                            +>|                |
+ *                              |                |
+ *                              |________________|
+ *
+ *
+ * N: Interrupt Channel Instance Number (N=7)
+ * X: Interrupt Source Number for each channel (X=32)
+ *
+ * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32
+ * interrupt sources and generates 1 interrupt output.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define CHANIER(n)     (0x10 + (0x40 * n))
+#define CHANIPR(n)     (0x20 + (0x40 * n))
+
+#define CHAN_MAX_NUM           0x8
+
+struct intmux_irqchip_data {
+       int                     chanidx;
+       int                     irq;
+       struct irq_domain       *domain;
+};
+
+struct intmux_data {
+       raw_spinlock_t                  lock;
+       void __iomem                    *regs;
+       struct clk                      *ipg_clk;
+       int                             channum;
+       struct intmux_irqchip_data      irqchip_data[];
+};
+
+static void imx_intmux_irq_mask(struct irq_data *d)
+{
+       struct intmux_irqchip_data *irqchip_data = d->chip_data;
+       int idx = irqchip_data->chanidx;
+       struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+                                               irqchip_data[idx]);
+       unsigned long flags;
+       void __iomem *reg;
+       u32 val;
+
+       raw_spin_lock_irqsave(&data->lock, flags);
+       reg = data->regs + CHANIER(idx);
+       val = readl_relaxed(reg);
+       /* disable the interrupt source of this channel */
+       val &= ~BIT(d->hwirq);
+       writel_relaxed(val, reg);
+       raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_intmux_irq_unmask(struct irq_data *d)
+{
+       struct intmux_irqchip_data *irqchip_data = d->chip_data;
+       int idx = irqchip_data->chanidx;
+       struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+                                               irqchip_data[idx]);
+       unsigned long flags;
+       void __iomem *reg;
+       u32 val;
+
+       raw_spin_lock_irqsave(&data->lock, flags);
+       reg = data->regs + CHANIER(idx);
+       val = readl_relaxed(reg);
+       /* enable the interrupt source of this channel */
+       val |= BIT(d->hwirq);
+       writel_relaxed(val, reg);
+       raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static struct irq_chip imx_intmux_irq_chip = {
+       .name           = "intmux",
+       .irq_mask       = imx_intmux_irq_mask,
+       .irq_unmask     = imx_intmux_irq_unmask,
+};
+
+static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
+                             irq_hw_number_t hwirq)
+{
+       irq_set_chip_data(irq, h->host_data);
+       irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
+
+       return 0;
+}
+
+static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node,
+                               const u32 *intspec, unsigned int intsize,
+                               unsigned long *out_hwirq, unsigned int *out_type)
+{
+       struct intmux_irqchip_data *irqchip_data = d->host_data;
+       int idx = irqchip_data->chanidx;
+       struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+                                               irqchip_data[idx]);
+
+       /*
+        * two cells needed in interrupt specifier:
+        * the 1st cell: hw interrupt number
+        * the 2nd cell: channel index
+        */
+       if (WARN_ON(intsize != 2))
+               return -EINVAL;
+
+       if (WARN_ON(intspec[1] >= data->channum))
+               return -EINVAL;
+
+       *out_hwirq = intspec[0];
+       *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+       return 0;
+}
+
+static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+                                enum irq_domain_bus_token bus_token)
+{
+       struct intmux_irqchip_data *irqchip_data = d->host_data;
+
+       /* Not for us */
+       if (fwspec->fwnode != d->fwnode)
+               return false;
+
+       return irqchip_data->chanidx == fwspec->param[1];
+}
+
+static const struct irq_domain_ops imx_intmux_domain_ops = {
+       .map            = imx_intmux_irq_map,
+       .xlate          = imx_intmux_irq_xlate,
+       .select         = imx_intmux_irq_select,
+};
+
+static void imx_intmux_irq_handler(struct irq_desc *desc)
+{
+       struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc);
+       int idx = irqchip_data->chanidx;
+       struct intmux_data *data = container_of(irqchip_data, struct intmux_data,
+                                               irqchip_data[idx]);
+       unsigned long irqstat;
+       int pos, virq;
+
+       chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+       /* read the interrupt source pending status of this channel */
+       irqstat = readl_relaxed(data->regs + CHANIPR(idx));
+
+       for_each_set_bit(pos, &irqstat, 32) {
+               virq = irq_find_mapping(irqchip_data->domain, pos);
+               if (virq)
+                       generic_handle_irq(virq);
+       }
+
+       chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_intmux_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct irq_domain *domain;
+       struct intmux_data *data;
+       int channum;
+       int i, ret;
+
+       channum = platform_irq_count(pdev);
+       if (channum == -EPROBE_DEFER) {
+               return -EPROBE_DEFER;
+       } else if (channum > CHAN_MAX_NUM) {
+               dev_err(&pdev->dev, "supports up to %d multiplex channels\n",
+                       CHAN_MAX_NUM);
+               return -EINVAL;
+       }
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data) +
+                           channum * sizeof(data->irqchip_data[0]), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(data->regs)) {
+               dev_err(&pdev->dev, "failed to initialize reg\n");
+               return PTR_ERR(data->regs);
+       }
+
+       data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(data->ipg_clk)) {
+               ret = PTR_ERR(data->ipg_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
+               return ret;
+       }
+
+       data->channum = channum;
+       raw_spin_lock_init(&data->lock);
+
+       ret = clk_prepare_enable(data->ipg_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+               return ret;
+       }
+
+       for (i = 0; i < channum; i++) {
+               data->irqchip_data[i].chanidx = i;
+
+               data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
+               if (data->irqchip_data[i].irq <= 0) {
+                       ret = -EINVAL;
+                       dev_err(&pdev->dev, "failed to get irq\n");
+                       goto out;
+               }
+
+               domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+                                              &data->irqchip_data[i]);
+               if (!domain) {
+                       ret = -ENOMEM;
+                       dev_err(&pdev->dev, "failed to create IRQ domain\n");
+                       goto out;
+               }
+               data->irqchip_data[i].domain = domain;
+
+               /* disable all interrupt sources of this channel firstly */
+               writel_relaxed(0, data->regs + CHANIER(i));
+
+               irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+                                                imx_intmux_irq_handler,
+                                                &data->irqchip_data[i]);
+       }
+
+       platform_set_drvdata(pdev, data);
+
+       return 0;
+out:
+       clk_disable_unprepare(data->ipg_clk);
+       return ret;
+}
+
+static int imx_intmux_remove(struct platform_device *pdev)
+{
+       struct intmux_data *data = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < data->channum; i++) {
+               /* disable all interrupt sources of this channel */
+               writel_relaxed(0, data->regs + CHANIER(i));
+
+               irq_set_chained_handler_and_data(data->irqchip_data[i].irq,
+                                                NULL, NULL);
+
+               irq_domain_remove(data->irqchip_data[i].domain);
+       }
+
+       clk_disable_unprepare(data->ipg_clk);
+
+       return 0;
+}
+
+static const struct of_device_id imx_intmux_id[] = {
+       { .compatible = "fsl,imx-intmux", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver imx_intmux_driver = {
+       .driver = {
+               .name = "imx-intmux",
+               .of_match_table = imx_intmux_id,
+       },
+       .probe = imx_intmux_probe,
+       .remove = imx_intmux_remove,
+};
+builtin_platform_driver(imx_intmux_driver);
index 01d18b39069ebee06224a451a5c1d5bee00533cf..c5589ee0dfb3f3b7613083ec4e88161632145621 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/delay.h>
 
 #include <asm/io.h>
-#include <asm/mach-jz4740/irq.h>
 
 struct ingenic_intc_data {
        void __iomem *base;
@@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data)
                while (pending) {
                        int bit = __fls(pending);
 
-                       irq = irq_find_mapping(domain, bit + (i * 32));
+                       irq = irq_linear_revmap(domain, bit + (i * 32));
                        generic_handle_irq(irq);
                        pending &= ~BIT(bit);
                }
@@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node,
                goto out_unmap_irq;
        }
 
-       domain = irq_domain_add_legacy(node, num_chips * 32,
-                                      JZ4740_IRQ_BASE, 0,
+       domain = irq_domain_add_linear(node, num_chips * 32,
                                       &irq_generic_chip_ops, NULL);
        if (!domain) {
                err = -ENOMEM;
index 3f09f658e8e29e483f18074052b8a64559e97c1c..6b566bba263bdec3637aa7a8687cf2816e053971 100644 (file)
@@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = {
                .name           = "Hisilicon MBIGEN-V2",
                .of_match_table = mbigen_of_match,
                .acpi_match_table = ACPI_PTR(mbigen_acpi_match),
+               .suppress_bind_attrs = true,
        },
        .probe                  = mbigen_device_probe,
 };
index 829084b568fa8bb2c0c8e48be53a5e99cf7f4900..ccc7f823911bd3a12e4baa6eb1a8aeabbcaa455a 100644 (file)
 #define REG_PIN_47_SEL 0x08
 #define REG_FILTER_SEL 0x0c
 
+/* use for A1 like chips */
+#define REG_PIN_A1_SEL 0x04
+
 /*
  * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
  * bits 24 to 31. Tests on the actual HW show that these bits are
  * stuck at 0. Bits 8 to 15 are responsive and have the expected
  * effect.
  */
-#define REG_EDGE_POL_EDGE(x)   BIT(x)
-#define REG_EDGE_POL_LOW(x)    BIT(16 + (x))
-#define REG_BOTH_EDGE(x)       BIT(8 + (x))
-#define REG_EDGE_POL_MASK(x)    (      \
-               REG_EDGE_POL_EDGE(x) |  \
-               REG_EDGE_POL_LOW(x)  |  \
-               REG_BOTH_EDGE(x))
+#define REG_EDGE_POL_EDGE(params, x)   BIT((params)->edge_single_offset + (x))
+#define REG_EDGE_POL_LOW(params, x)    BIT((params)->pol_low_offset + (x))
+#define REG_BOTH_EDGE(params, x)       BIT((params)->edge_both_offset + (x))
+#define REG_EDGE_POL_MASK(params, x)    (      \
+               REG_EDGE_POL_EDGE(params, x) |  \
+               REG_EDGE_POL_LOW(params, x)  |  \
+               REG_BOTH_EDGE(params, x))
 #define REG_PIN_SEL_SHIFT(x)   (((x) % 4) * 8)
 #define REG_FILTER_SEL_SHIFT(x)        ((x) * 4)
 
+struct meson_gpio_irq_controller;
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+                                   unsigned int channel, unsigned long hwirq);
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl);
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+                                     unsigned int channel,
+                                     unsigned long hwirq);
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
+
+struct irq_ctl_ops {
+       void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
+                                unsigned int channel, unsigned long hwirq);
+       void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
+};
+
 struct meson_gpio_irq_params {
        unsigned int nr_hwirq;
        bool support_edge_both;
+       unsigned int edge_both_offset;
+       unsigned int edge_single_offset;
+       unsigned int pol_low_offset;
+       unsigned int pin_sel_mask;
+       struct irq_ctl_ops ops;
 };
 
+#define INIT_MESON_COMMON(irqs, init, sel)                     \
+       .nr_hwirq = irqs,                                       \
+       .ops = {                                                \
+               .gpio_irq_init = init,                          \
+               .gpio_irq_sel_pin = sel,                        \
+       },
+
+#define INIT_MESON8_COMMON_DATA(irqs)                          \
+       INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy,      \
+                         meson8_gpio_irq_sel_pin)              \
+       .edge_single_offset = 0,                                \
+       .pol_low_offset = 16,                                   \
+       .pin_sel_mask = 0xff,                                   \
+
+#define INIT_MESON_A1_COMMON_DATA(irqs)                                \
+       INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init,         \
+                         meson_a1_gpio_irq_sel_pin)            \
+       .support_edge_both = true,                              \
+       .edge_both_offset = 16,                                 \
+       .edge_single_offset = 8,                                \
+       .pol_low_offset = 0,                                    \
+       .pin_sel_mask = 0x7f,                                   \
+
 static const struct meson_gpio_irq_params meson8_params = {
-       .nr_hwirq = 134,
+       INIT_MESON8_COMMON_DATA(134)
 };
 
 static const struct meson_gpio_irq_params meson8b_params = {
-       .nr_hwirq = 119,
+       INIT_MESON8_COMMON_DATA(119)
 };
 
 static const struct meson_gpio_irq_params gxbb_params = {
-       .nr_hwirq = 133,
+       INIT_MESON8_COMMON_DATA(133)
 };
 
 static const struct meson_gpio_irq_params gxl_params = {
-       .nr_hwirq = 110,
+       INIT_MESON8_COMMON_DATA(110)
 };
 
 static const struct meson_gpio_irq_params axg_params = {
-       .nr_hwirq = 100,
+       INIT_MESON8_COMMON_DATA(100)
 };
 
 static const struct meson_gpio_irq_params sm1_params = {
-       .nr_hwirq = 100,
+       INIT_MESON8_COMMON_DATA(100)
        .support_edge_both = true,
+       .edge_both_offset = 8,
+};
+
+static const struct meson_gpio_irq_params a1_params = {
+       INIT_MESON_A1_COMMON_DATA(62)
 };
 
 static const struct of_device_id meson_irq_gpio_matches[] = {
@@ -78,6 +129,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
        { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
        { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
        { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
+       { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
        { }
 };
 
@@ -100,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
        writel_relaxed(tmp, ctl->base + reg);
 }
 
-static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+{
+}
+
+static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+                                   unsigned int channel, unsigned long hwirq)
+{
+       unsigned int reg_offset;
+       unsigned int bit_offset;
+
+       reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+       bit_offset = REG_PIN_SEL_SHIFT(channel);
+
+       meson_gpio_irq_update_bits(ctl, reg_offset,
+                                  ctl->params->pin_sel_mask << bit_offset,
+                                  hwirq << bit_offset);
+}
+
+static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
+                                     unsigned int channel,
+                                     unsigned long hwirq)
 {
-       return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+       unsigned int reg_offset;
+       unsigned int bit_offset;
+
+       bit_offset = ((channel % 2) == 0) ? 0 : 16;
+       reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2);
+
+       meson_gpio_irq_update_bits(ctl, reg_offset,
+                                  ctl->params->pin_sel_mask << bit_offset,
+                                  hwirq << bit_offset);
+}
+
+/* For a1 or later chips like a1 there is a switch to enable/disable irq */
+static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl)
+{
+       meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31));
 }
 
 static int
@@ -110,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
                               unsigned long  hwirq,
                               u32 **channel_hwirq)
 {
-       unsigned int reg, idx;
+       unsigned int idx;
 
        spin_lock(&ctl->lock);
 
@@ -129,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
         * Setup the mux of the channel to route the signal of the pad
         * to the appropriate input of the GIC
         */
-       reg = meson_gpio_irq_channel_to_reg(idx);
-       meson_gpio_irq_update_bits(ctl, reg,
-                                  0xff << REG_PIN_SEL_SHIFT(idx),
-                                  hwirq << REG_PIN_SEL_SHIFT(idx));
+       ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq);
 
        /*
         * Get the hwirq number assigned to this channel through
@@ -173,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
 {
        u32 val = 0;
        unsigned int idx;
+       const struct meson_gpio_irq_params *params;
 
+       params = ctl->params;
        idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
 
        /*
@@ -190,22 +275,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
         * precedence over the other edge/polarity settings
         */
        if (type == IRQ_TYPE_EDGE_BOTH) {
-               if (!ctl->params->support_edge_both)
+               if (!params->support_edge_both)
                        return -EINVAL;
 
-               val |= REG_BOTH_EDGE(idx);
+               val |= REG_BOTH_EDGE(params, idx);
        } else {
                if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
-                       val |= REG_EDGE_POL_EDGE(idx);
+                       val |= REG_EDGE_POL_EDGE(params, idx);
 
                if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
-                       val |= REG_EDGE_POL_LOW(idx);
+                       val |= REG_EDGE_POL_LOW(params, idx);
        }
 
        spin_lock(&ctl->lock);
 
        meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
-                                  REG_EDGE_POL_MASK(idx), val);
+                                  REG_EDGE_POL_MASK(params, idx), val);
 
        spin_unlock(&ctl->lock);
 
@@ -371,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
                return ret;
        }
 
+       ctl->params->ops.gpio_irq_init(ctl);
+
        return 0;
 }
 
index f3985469c2211c7fea4b40469a47c1e27711a510..d70507133c1ded9d2831c20ce7725026d1bff376 100644 (file)
@@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node,
                __sync();
        }
 
-       mips_gic_base = ioremap_nocache(gic_base, gic_len);
+       mips_gic_base = ioremap(gic_base, gic_len);
 
        gicconfig = read_gic_config();
        gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
index a166d30deea267d84f6f7951f3cb0dd771eb2ea1..f747e2209ea995c29803c5c8dbf55a1fe46a47d6 100644 (file)
@@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
        handle_IRQ(irq, regs);
 }
 
-static int nvic_irq_domain_translate(struct irq_domain *d,
-                                    struct irq_fwspec *fwspec,
-                                    unsigned long *hwirq, unsigned int *type)
-{
-       if (WARN_ON(fwspec->param_count < 1))
-               return -EINVAL;
-       *hwirq = fwspec->param[0];
-       *type = IRQ_TYPE_NONE;
-       return 0;
-}
-
 static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                unsigned int nr_irqs, void *arg)
 {
@@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        unsigned int type = IRQ_TYPE_NONE;
        struct irq_fwspec *fwspec = arg;
 
-       ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type);
+       ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
        if (ret)
                return ret;
 
@@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 }
 
 static const struct irq_domain_ops nvic_irq_domain_ops = {
-       .translate = nvic_irq_domain_translate,
+       .translate = irq_domain_translate_onecell,
        .alloc = nvic_irq_domain_alloc,
        .free = irq_domain_free_irqs_top,
 };
index f82bc60a67931cd70a6b9baf2d30e44de716b28d..6e5e3172796bca3ba1598192dec69d125ca91f93 100644 (file)
@@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
                        goto err0;
                }
 
-               i->iomem = devm_ioremap_nocache(dev, io[k]->start,
+               i->iomem = devm_ioremap(dev, io[k]->start,
                                                resource_size(io[k]));
                if (!i->iomem) {
                        dev_err(dev, "failed to remap IOMEM\n");
index 8df547d2d935309c2bdf5f60d018e0b64dc686ac..aa4af886e43ae28563b82ee42a2a4c4249f43ec7 100644 (file)
@@ -154,15 +154,37 @@ static struct irq_chip plic_chip = {
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hwirq)
 {
-       irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
-       irq_set_chip_data(irq, NULL);
+       irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data,
+                           handle_fasteoi_irq, NULL, NULL);
        irq_set_noprobe(irq);
        return 0;
 }
 
+static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+                                unsigned int nr_irqs, void *arg)
+{
+       int i, ret;
+       irq_hw_number_t hwirq;
+       unsigned int type;
+       struct irq_fwspec *fwspec = arg;
+
+       ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < nr_irqs; i++) {
+               ret = plic_irqdomain_map(domain, virq + i, hwirq + i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static const struct irq_domain_ops plic_irqdomain_ops = {
-       .map            = plic_irqdomain_map,
-       .xlate          = irq_domain_xlate_onecell,
+       .translate      = irq_domain_translate_onecell,
+       .alloc          = plic_irq_domain_alloc,
+       .free           = irq_domain_free_irqs_top,
 };
 
 static struct irq_domain *plic_irqdomain;
@@ -256,7 +278,7 @@ static int __init plic_init(struct device_node *node,
                 * Skip contexts other than external interrupts for our
                 * privilege level.
                 */
-               if (parent.args[0] != IRQ_EXT)
+               if (parent.args[0] != RV_IRQ_EXT)
                        continue;
 
                hartid = plic_find_hart_id(parent.np);
index b7e0ae1af8fa5d57a68ffdbd8423b69222dc53e5..e8922fa03379620f4ee15896d224bd81535a589f 100644 (file)
@@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash,
                switch (id) {
                case AS_LED_FLASH:
                        flash->flash_node = child;
+                       fwnode_handle_get(child);
                        break;
                case AS_LED_INDICATOR:
                        flash->indicator_node = child;
+                       fwnode_handle_get(child);
                        break;
                default:
                        dev_warn(&flash->client->dev,
                                 "unknown LED %u encountered, ignoring\n", id);
                        break;
                }
-               fwnode_handle_get(child);
        }
 
        if (!flash->flash_node) {
index a5c73f3d5f797dc63b2ee03c738a213a9f8529fb..2bf74595610f5c607173ef2597fe95c41e99ce5d 100644 (file)
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                struct gpio_led led = {};
                const char *state = NULL;
 
+               /*
+                * Acquire gpiod from DT with uninitialized label, which
+                * will be updated after LED class device is registered,
+                * Only then the final LED name is known.
+                */
                led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
                                                             GPIOD_ASIS,
-                                                            led.name);
+                                                            NULL);
                if (IS_ERR(led.gpiod)) {
                        fwnode_handle_put(child);
                        return ERR_CAST(led.gpiod);
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                        fwnode_handle_put(child);
                        return ERR_PTR(ret);
                }
+               /* Set gpiod label to match the corresponding LED name. */
+               gpiod_set_consumer_name(led_dat->gpiod,
+                                       led_dat->cdev.dev->kobj.name);
                priv->num_leds++;
        }
 
index 0507c6575c0896853b47ba735c2fef332141b600..491268bb34a7bcf0b5b7ec2d519424cc5f6ee54a 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // TI LM3532 LED driver
 // Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
 
 #include <linux/i2c.h>
 #include <linux/leds.h>
@@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
 
                led->num_leds = fwnode_property_count_u32(child, "led-sources");
                if (led->num_leds > LM3532_MAX_LED_STRINGS) {
-                       dev_err(&priv->client->dev, "To many LED string defined\n");
+                       dev_err(&priv->client->dev, "Too many LED string defined\n");
                        continue;
                }
 
index 4c2d0b3c6dadcc1492d2d25d5dc3a9f8d4480bde..a0d4b725c917801c31ba0f1beee5a5a366c944af 100644 (file)
@@ -135,9 +135,16 @@ err_node_put:
        return rv;
 }
 
+static const struct of_device_id max77650_led_of_match[] = {
+       { .compatible = "maxim,max77650-led" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
 static struct platform_driver max77650_led_driver = {
        .driver = {
                .name = "max77650-led",
+               .of_match_table = max77650_led_of_match,
        },
        .probe = max77650_led_probe,
 };
index db5af83f0ceccba97c0030f7a7b9ada92bf00126..b6447c1721b4dfc5d28a43e13e4ce0e31777fd93 100644 (file)
@@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev,
 {
        if (brightness)
                set_latch_u5(LO_ULED, 0);
-
        else
                set_latch_u5(0, LO_ULED);
 }
index 718729c89440e6c8386959b1f25ba3588b65483b..3abcafe46278c5a33783a8533bc9e9cd043c7bee 100644 (file)
@@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void)
 module_init(pattern_trig_init);
 module_exit(pattern_trig_exit);
 
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
 MODULE_DESCRIPTION("LED Pattern trigger");
 MODULE_LICENSE("GPL v2");
index 9534503b69d95fd29df423cc7a33ab638977b10f..47b67c6bff7a4a4ff3ad2a0aff709eb1e3af9a0c 100644 (file)
@@ -46,7 +46,7 @@ TRACE_EVENT(pblk_chunk_reset,
        TP_STRUCT__entry(
                __string(name, name)
                __field(u64, ppa)
-               __field(int, state);
+               __field(int, state)
        ),
 
        TP_fast_assign(
@@ -72,7 +72,7 @@ TRACE_EVENT(pblk_chunk_state,
        TP_STRUCT__entry(
                __string(name, name)
                __field(u64, ppa)
-               __field(int, state);
+               __field(int, state)
        ),
 
        TP_fast_assign(
@@ -98,7 +98,7 @@ TRACE_EVENT(pblk_line_state,
        TP_STRUCT__entry(
                __string(name, name)
                __field(int, line)
-               __field(int, state);
+               __field(int, state)
        ),
 
        TP_fast_assign(
@@ -121,7 +121,7 @@ TRACE_EVENT(pblk_state,
 
        TP_STRUCT__entry(
                __string(name, name)
-               __field(int, state);
+               __field(int, state)
        ),
 
        TP_fast_assign(
index 9198c1b480d98fb823d44a4247b91184ea5a3305..adf26a21fcd10fd30df72663fa46e5d6c4f15140 100644 (file)
@@ -301,6 +301,7 @@ struct cached_dev {
        struct block_device     *bdev;
 
        struct cache_sb         sb;
+       struct cache_sb_disk    *sb_disk;
        struct bio              sb_bio;
        struct bio_vec          sb_bv[1];
        struct closure          sb_write;
@@ -403,6 +404,7 @@ enum alloc_reserve {
 struct cache {
        struct cache_set        *set;
        struct cache_sb         sb;
+       struct cache_sb_disk    *sb_disk;
        struct bio              sb_bio;
        struct bio_vec          sb_bv[1];
 
index cffcdc9feefbd3a193f01fc6a45333d19c59a2de..4385303836d8e7cfe873bd497e90ed978ec7c50d 100644 (file)
@@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
                 * Our temporary buffer is the same size as the btree node's
                 * buffer, we can just swap buffers instead of doing a big
                 * memcpy()
+                *
+                * Don't worry event 'out' is allocated from mempool, it can
+                * still be swapped here. Because state->pool is a page mempool
+                * creaated by by mempool_init_page_pool(), which allocates
+                * pages by alloc_pages() indeed.
                 */
 
                out->magic      = b->set->data->magic;
index 14d6c33b0957ede67b4621b41f655a1057a00ec7..fa872df4e7703fb65b229c2ddcac9dbadad6593c 100644 (file)
@@ -734,34 +734,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
 
        i = 0;
        btree_cache_used = c->btree_cache_used;
-       list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+       list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
                if (nr <= 0)
                        goto out;
 
-               if (++i > 3 &&
-                   !mca_reap(b, 0, false)) {
+               if (!mca_reap(b, 0, false)) {
                        mca_data_free(b);
                        rw_unlock(true, b);
                        freed++;
                }
                nr--;
+               i++;
        }
 
-       for (;  (nr--) && i < btree_cache_used; i++) {
-               if (list_empty(&c->btree_cache))
+       list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+               if (nr <= 0 || i >= btree_cache_used)
                        goto out;
 
-               b = list_first_entry(&c->btree_cache, struct btree, list);
-               list_rotate_left(&c->btree_cache);
-
-               if (!b->accessed &&
-                   !mca_reap(b, 0, false)) {
+               if (!mca_reap(b, 0, false)) {
                        mca_bucket_free(b);
                        mca_data_free(b);
                        rw_unlock(true, b);
                        freed++;
-               } else
-                       b->accessed = 0;
+               }
+
+               nr--;
+               i++;
        }
 out:
        mutex_unlock(&c->bucket_lock);
@@ -1069,7 +1067,6 @@ retry:
        BUG_ON(!b->written);
 
        b->parent = parent;
-       b->accessed = 1;
 
        for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
                prefetch(b->keys.set[i].tree);
@@ -1160,7 +1157,6 @@ retry:
                goto retry;
        }
 
-       b->accessed = 1;
        b->parent = parent;
        bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
 
index 76cfd121a48618d15f8981f955f62fd5ade80adf..f4dcca4493913fa91a36560aa4fb9aa21e4d9f86 100644 (file)
@@ -121,8 +121,6 @@ struct btree {
        /* Key/pointer for this btree node */
        BKEY_PADDED(key);
 
-       /* Single bit - set when accessed, cleared by shrinker */
-       unsigned long           accessed;
        unsigned long           seq;
        struct rw_semaphore     lock;
        struct cache_set        *c;
index be2a2a2016032da538a5b4d30842d829334b4120..33ddc5269e8dc702f10817687fcdcc9c1a224953 100644 (file)
@@ -417,10 +417,14 @@ err:
 
 /* Journalling */
 
+#define nr_to_fifo_front(p, front_p, mask)     (((p) - (front_p)) & (mask))
+
 static void btree_flush_write(struct cache_set *c)
 {
        struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
-       unsigned int i, n;
+       unsigned int i, nr, ref_nr;
+       atomic_t *fifo_front_p, *now_fifo_front_p;
+       size_t mask;
 
        if (c->journal.btree_flushing)
                return;
@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
        c->journal.btree_flushing = true;
        spin_unlock(&c->journal.flush_write_lock);
 
+       /* get the oldest journal entry and check its refcount */
+       spin_lock(&c->journal.lock);
+       fifo_front_p = &fifo_front(&c->journal.pin);
+       ref_nr = atomic_read(fifo_front_p);
+       if (ref_nr <= 0) {
+               /*
+                * do nothing if no btree node references
+                * the oldest journal entry
+                */
+               spin_unlock(&c->journal.lock);
+               goto out;
+       }
+       spin_unlock(&c->journal.lock);
+
+       mask = c->journal.pin.mask;
+       nr = 0;
        atomic_long_inc(&c->flush_write);
        memset(btree_nodes, 0, sizeof(btree_nodes));
-       n = 0;
 
        mutex_lock(&c->bucket_lock);
        list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+               /*
+                * It is safe to get now_fifo_front_p without holding
+                * c->journal.lock here, because we don't need to know
+                * the exactly accurate value, just check whether the
+                * front pointer of c->journal.pin is changed.
+                */
+               now_fifo_front_p = &fifo_front(&c->journal.pin);
+               /*
+                * If the oldest journal entry is reclaimed and front
+                * pointer of c->journal.pin changes, it is unnecessary
+                * to scan c->btree_cache anymore, just quit the loop and
+                * flush out what we have already.
+                */
+               if (now_fifo_front_p != fifo_front_p)
+                       break;
+               /*
+                * quit this loop if all matching btree nodes are
+                * scanned and record in btree_nodes[] already.
+                */
+               ref_nr = atomic_read(fifo_front_p);
+               if (nr >= ref_nr)
+                       break;
+
                if (btree_node_journal_flush(b))
                        pr_err("BUG: flush_write bit should not be set here!");
 
@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
                        continue;
                }
 
+               /*
+                * Only select the btree node which exactly references
+                * the oldest journal entry.
+                *
+                * If the journal entry pointed by fifo_front_p is
+                * reclaimed in parallel, don't worry:
+                * - the list_for_each_xxx loop will quit when checking
+                *   next now_fifo_front_p.
+                * - If there are matched nodes recorded in btree_nodes[],
+                *   they are clean now (this is why and how the oldest
+                *   journal entry can be reclaimed). These selected nodes
+                *   will be ignored and skipped in the folowing for-loop.
+                */
+               if (nr_to_fifo_front(btree_current_write(b)->journal,
+                                    fifo_front_p,
+                                    mask) != 0) {
+                       mutex_unlock(&b->write_lock);
+                       continue;
+               }
+
                set_btree_node_journal_flush(b);
 
                mutex_unlock(&b->write_lock);
 
-               btree_nodes[n++] = b;
-               if (n == BTREE_FLUSH_NR)
+               btree_nodes[nr++] = b;
+               /*
+                * To avoid holding c->bucket_lock too long time,
+                * only scan for BTREE_FLUSH_NR matched btree nodes
+                * at most. If there are more btree nodes reference
+                * the oldest journal entry, try to flush them next
+                * time when btree_flush_write() is called.
+                */
+               if (nr == BTREE_FLUSH_NR)
                        break;
        }
        mutex_unlock(&c->bucket_lock);
 
-       for (i = 0; i < n; i++) {
+       for (i = 0; i < nr; i++) {
                b = btree_nodes[i];
                if (!b) {
                        pr_err("BUG: btree_nodes[%d] is NULL", i);
@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
                mutex_unlock(&b->write_lock);
        }
 
+out:
        spin_lock(&c->journal.flush_write_lock);
        c->journal.btree_flushing = false;
        spin_unlock(&c->journal.flush_write_lock);
index 77e9869345e70c5274f965c27a0c22ce2ae132be..3dea1d5acd5c395effae275a45b364ba70a16b04 100644 (file)
@@ -15,7 +15,6 @@
 #include "writeback.h"
 
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>
 #include <linux/debugfs.h>
 #include <linux/genhd.h>
 #include <linux/idr.h>
@@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq;
 /* Superblock */
 
 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
-                             struct page **res)
+                             struct cache_sb_disk **res)
 {
        const char *err;
-       struct cache_sb *s;
-       struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+       struct cache_sb_disk *s;
+       struct page *page;
        unsigned int i;
 
-       if (!bh)
+       page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+                                  SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+       if (IS_ERR(page))
                return "IO error";
-
-       s = (struct cache_sb *) bh->b_data;
+       s = page_address(page) + offset_in_page(SB_OFFSET);
 
        sb->offset              = le64_to_cpu(s->offset);
        sb->version             = le64_to_cpu(s->version);
@@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
        }
 
        sb->last_mount = (u32)ktime_get_real_seconds();
-       err = NULL;
-
-       get_page(bh->b_page);
-       *res = bh->b_page;
+       *res = s;
+       return NULL;
 err:
-       put_bh(bh);
+       put_page(page);
        return err;
 }
 
@@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio)
        closure_put(&dc->sb_write);
 }
 
-static void __write_super(struct cache_sb *sb, struct bio *bio)
+static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
+               struct bio *bio)
 {
-       struct cache_sb *out = page_address(bio_first_page_all(bio));
        unsigned int i;
 
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
        bio->bi_iter.bi_sector  = SB_SECTOR;
-       bio->bi_iter.bi_size    = SB_SIZE;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
-       bch_bio_map(bio, NULL);
+       __bio_add_page(bio, virt_to_page(out), SB_SIZE,
+                       offset_in_page(out));
 
        out->offset             = cpu_to_le64(sb->offset);
        out->version            = cpu_to_le64(sb->version);
@@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
        down(&dc->sb_write_mutex);
        closure_init(cl, parent);
 
-       bio_reset(bio);
+       bio_init(bio, dc->sb_bv, 1);
        bio_set_dev(bio, dc->bdev);
        bio->bi_end_io  = write_bdev_super_endio;
        bio->bi_private = dc;
 
        closure_get(cl);
        /* I/O request sent to backing device */
-       __write_super(&dc->sb, bio);
+       __write_super(&dc->sb, dc->sb_disk, bio);
 
        closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
 }
@@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c)
 
                SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
 
-               bio_reset(bio);
+               bio_init(bio, ca->sb_bv, 1);
                bio_set_dev(bio, ca->bdev);
                bio->bi_end_io  = write_super_endio;
                bio->bi_private = ca;
 
                closure_get(cl);
-               __write_super(&ca->sb, bio);
+               __write_super(&ca->sb, ca->sb_disk, bio);
        }
 
        closure_return_with_destructor(cl, bcache_write_super_unlock);
@@ -1275,6 +1273,9 @@ static void cached_dev_free(struct closure *cl)
 
        mutex_unlock(&bch_register_lock);
 
+       if (dc->sb_disk)
+               put_page(virt_to_page(dc->sb_disk));
+
        if (!IS_ERR_OR_NULL(dc->bdev))
                blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 
@@ -1350,7 +1351,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
 
 /* Cached device - bcache superblock */
 
-static int register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
                                 struct block_device *bdev,
                                 struct cached_dev *dc)
 {
@@ -1362,11 +1363,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page,
        memcpy(&dc->sb, sb, sizeof(struct cache_sb));
        dc->bdev = bdev;
        dc->bdev->bd_holder = dc;
-
-       bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
-       bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
-       get_page(sb_page);
-
+       dc->sb_disk = sb_disk;
 
        if (cached_dev_init(dc, sb->block_size << 9))
                goto err;
@@ -2136,8 +2133,8 @@ void bch_cache_release(struct kobject *kobj)
        for (i = 0; i < RESERVE_NR; i++)
                free_fifo(&ca->free[i]);
 
-       if (ca->sb_bio.bi_inline_vecs[0].bv_page)
-               put_page(bio_first_page_all(&ca->sb_bio));
+       if (ca->sb_disk)
+               put_page(virt_to_page(ca->sb_disk));
 
        if (!IS_ERR_OR_NULL(ca->bdev))
                blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -2259,7 +2256,7 @@ err_free:
        return ret;
 }
 
-static int register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
                                struct block_device *bdev, struct cache *ca)
 {
        const char *err = NULL; /* must be set for any error case */
@@ -2269,10 +2266,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
        memcpy(&ca->sb, sb, sizeof(struct cache_sb));
        ca->bdev = bdev;
        ca->bdev->bd_holder = ca;
-
-       bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
-       bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
-       get_page(sb_page);
+       ca->sb_disk = sb_disk;
 
        if (blk_queue_discard(bdev_get_queue(bdev)))
                ca->discard = CACHE_DISCARD(&ca->sb);
@@ -2372,29 +2366,35 @@ static bool bch_is_open(struct block_device *bdev)
 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                               const char *buffer, size_t size)
 {
-       ssize_t ret = -EINVAL;
-       const char *err = "cannot allocate memory";
+       const char *err;
        char *path = NULL;
-       struct cache_sb *sb = NULL;
-       struct block_device *bdev = NULL;
-       struct page *sb_page = NULL;
+       struct cache_sb *sb;
+       struct cache_sb_disk *sb_disk;
+       struct block_device *bdev;
+       ssize_t ret;
 
+       ret = -EBUSY;
+       err = "failed to reference bcache module";
        if (!try_module_get(THIS_MODULE))
-               return -EBUSY;
+               goto out;
 
        /* For latest state of bcache_is_reboot */
        smp_mb();
+       err = "bcache is in reboot";
        if (bcache_is_reboot)
-               return -EBUSY;
+               goto out_module_put;
 
+       ret = -ENOMEM;
+       err = "cannot allocate memory";
        path = kstrndup(buffer, size, GFP_KERNEL);
        if (!path)
-               goto err;
+               goto out_module_put;
 
        sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
        if (!sb)
-               goto err;
+               goto out_free_path;
 
+       ret = -EINVAL;
        err = "failed to open device";
        bdev = blkdev_get_by_path(strim(path),
                                  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
@@ -2411,57 +2411,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                        if (!IS_ERR(bdev))
                                bdput(bdev);
                        if (attr == &ksysfs_register_quiet)
-                               goto quiet_out;
+                               goto done;
                }
-               goto err;
+               goto out_free_sb;
        }
 
        err = "failed to set blocksize";
        if (set_blocksize(bdev, 4096))
-               goto err_close;
+               goto out_blkdev_put;
 
-       err = read_super(sb, bdev, &sb_page);
+       err = read_super(sb, bdev, &sb_disk);
        if (err)
-               goto err_close;
+               goto out_blkdev_put;
 
        err = "failed to register device";
        if (SB_IS_BDEV(sb)) {
                struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
 
                if (!dc)
-                       goto err_close;
+                       goto out_put_sb_page;
 
                mutex_lock(&bch_register_lock);
-               ret = register_bdev(sb, sb_page, bdev, dc);
+               ret = register_bdev(sb, sb_disk, bdev, dc);
                mutex_unlock(&bch_register_lock);
                /* blkdev_put() will be called in cached_dev_free() */
                if (ret < 0)
-                       goto err;
+                       goto out_free_sb;
        } else {
                struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
 
                if (!ca)
-                       goto err_close;
+                       goto out_put_sb_page;
 
                /* blkdev_put() will be called in bch_cache_release() */
-               if (register_cache(sb, sb_page, bdev, ca) != 0)
-                       goto err;
+               if (register_cache(sb, sb_disk, bdev, ca) != 0)
+                       goto out_free_sb;
        }
-quiet_out:
-       ret = size;
-out:
-       if (sb_page)
-               put_page(sb_page);
+
+done:
        kfree(sb);
        kfree(path);
        module_put(THIS_MODULE);
-       return ret;
+       return size;
 
-err_close:
-       blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
-       pr_info("error %s: %s", path, err);
-       goto out;
+out_put_sb_page:
+       put_page(virt_to_page(sb_disk));
+out_blkdev_put:
+       blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+       kfree(sb);
+out_free_path:
+       kfree(path);
+       path = NULL;
+out_module_put:
+       module_put(THIS_MODULE);
+out:
+       pr_info("error %s: %s", path?path:"", err);
+       return ret;
 }
 
 
index 08c552e5e41b2f6c96f0142db37df659ab96fa9f..c05b121104561a4cd777b40cdb97ba14838f7ff2 100644 (file)
@@ -67,23 +67,34 @@ struct superblock_disk {
  * To save constantly doing look ups on disk we keep an in core copy of the
  * on-disk bitmap, the region_map.
  *
- * To further reduce metadata I/O overhead we use a second bitmap, the dmap
- * (dirty bitmap), which tracks the dirty words, i.e. longs, of the region_map.
+ * In order to track which regions are hydrated during a metadata transaction,
+ * we use a second set of bitmaps, the dmap (dirty bitmap), which includes two
+ * bitmaps, namely dirty_regions and dirty_words. The dirty_regions bitmap
+ * tracks the regions that got hydrated during the current metadata
+ * transaction. The dirty_words bitmap tracks the dirty words, i.e. longs, of
+ * the dirty_regions bitmap.
+ *
+ * This allows us to precisely track the regions that were hydrated during the
+ * current metadata transaction and update the metadata accordingly, when we
+ * commit the current transaction. This is important because dm-clone should
+ * only commit the metadata of regions that were properly flushed to the
+ * destination device beforehand. Otherwise, in case of a crash, we could end
+ * up with a corrupted dm-clone device.
  *
  * When a region finishes hydrating dm-clone calls
  * dm_clone_set_region_hydrated(), or for discard requests
  * dm_clone_cond_set_range(), which sets the corresponding bits in region_map
  * and dmap.
  *
- * During a metadata commit we scan the dmap for dirty region_map words (longs)
- * and update accordingly the on-disk metadata. Thus, we don't have to flush to
- * disk the whole region_map. We can just flush the dirty region_map words.
+ * During a metadata commit we scan dmap->dirty_words and dmap->dirty_regions
+ * and update the on-disk metadata accordingly. Thus, we don't have to flush to
+ * disk the whole region_map. We can just flush the dirty region_map bits.
  *
- * We use a dirty bitmap, which is smaller than the original region_map, to
- * reduce the amount of memory accesses during a metadata commit. As dm-bitset
- * accesses the on-disk bitmap in 64-bit word granularity, there is no
- * significant benefit in tracking the dirty region_map bits with a smaller
- * granularity.
+ * We use the helper dmap->dirty_words bitmap, which is smaller than the
+ * original region_map, to reduce the amount of memory accesses during a
+ * metadata commit. Moreover, as dm-bitset also accesses the on-disk bitmap in
+ * 64-bit word granularity, the dirty_words bitmap helps us avoid useless disk
+ * accesses.
  *
  * We could update directly the on-disk bitmap, when dm-clone calls either
  * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), buts this
@@ -92,12 +103,13 @@ struct superblock_disk {
  * e.g., in a hooked overwrite bio's completion routine, and further reduce the
  * I/O completion latency.
  *
- * We maintain two dirty bitmaps. During a metadata commit we atomically swap
- * the currently used dmap with the unused one. This allows the metadata update
- * functions to run concurrently with an ongoing commit.
+ * We maintain two dirty bitmap sets. During a metadata commit we atomically
+ * swap the currently used dmap with the unused one. This allows the metadata
+ * update functions to run concurrently with an ongoing commit.
  */
 struct dirty_map {
        unsigned long *dirty_words;
+       unsigned long *dirty_regions;
        unsigned int changed;
 };
 
@@ -115,6 +127,9 @@ struct dm_clone_metadata {
        struct dirty_map dmap[2];
        struct dirty_map *current_dmap;
 
+       /* Protected by lock */
+       struct dirty_map *committing_dmap;
+
        /*
         * In core copy of the on-disk bitmap to save constantly doing look ups
         * on disk.
@@ -461,34 +476,53 @@ static size_t bitmap_size(unsigned long nr_bits)
        return BITS_TO_LONGS(nr_bits) * sizeof(long);
 }
 
-static int dirty_map_init(struct dm_clone_metadata *cmd)
+static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
+                           unsigned long nr_regions)
 {
-       cmd->dmap[0].changed = 0;
-       cmd->dmap[0].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL);
+       dmap->changed = 0;
 
-       if (!cmd->dmap[0].dirty_words) {
-               DMERR("Failed to allocate dirty bitmap");
+       dmap->dirty_words = kvzalloc(bitmap_size(nr_words), GFP_KERNEL);
+       if (!dmap->dirty_words)
+               return -ENOMEM;
+
+       dmap->dirty_regions = kvzalloc(bitmap_size(nr_regions), GFP_KERNEL);
+       if (!dmap->dirty_regions) {
+               kvfree(dmap->dirty_words);
                return -ENOMEM;
        }
 
-       cmd->dmap[1].changed = 0;
-       cmd->dmap[1].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL);
+       return 0;
+}
+
+static void __dirty_map_exit(struct dirty_map *dmap)
+{
+       kvfree(dmap->dirty_words);
+       kvfree(dmap->dirty_regions);
+}
+
+static int dirty_map_init(struct dm_clone_metadata *cmd)
+{
+       if (__dirty_map_init(&cmd->dmap[0], cmd->nr_words, cmd->nr_regions)) {
+               DMERR("Failed to allocate dirty bitmap");
+               return -ENOMEM;
+       }
 
-       if (!cmd->dmap[1].dirty_words) {
+       if (__dirty_map_init(&cmd->dmap[1], cmd->nr_words, cmd->nr_regions)) {
                DMERR("Failed to allocate dirty bitmap");
-               kvfree(cmd->dmap[0].dirty_words);
+               __dirty_map_exit(&cmd->dmap[0]);
                return -ENOMEM;
        }
 
        cmd->current_dmap = &cmd->dmap[0];
+       cmd->committing_dmap = NULL;
 
        return 0;
 }
 
 static void dirty_map_exit(struct dm_clone_metadata *cmd)
 {
-       kvfree(cmd->dmap[0].dirty_words);
-       kvfree(cmd->dmap[1].dirty_words);
+       __dirty_map_exit(&cmd->dmap[0]);
+       __dirty_map_exit(&cmd->dmap[1]);
 }
 
 static int __load_bitset_in_core(struct dm_clone_metadata *cmd)
@@ -633,21 +667,23 @@ unsigned long dm_clone_find_next_unhydrated_region(struct dm_clone_metadata *cmd
        return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start);
 }
 
-static int __update_metadata_word(struct dm_clone_metadata *cmd, unsigned long word)
+static int __update_metadata_word(struct dm_clone_metadata *cmd,
+                                 unsigned long *dirty_regions,
+                                 unsigned long word)
 {
        int r;
        unsigned long index = word * BITS_PER_LONG;
        unsigned long max_index = min(cmd->nr_regions, (word + 1) * BITS_PER_LONG);
 
        while (index < max_index) {
-               if (test_bit(index, cmd->region_map)) {
+               if (test_bit(index, dirty_regions)) {
                        r = dm_bitset_set_bit(&cmd->bitset_info, cmd->bitset_root,
                                              index, &cmd->bitset_root);
-
                        if (r) {
                                DMERR("dm_bitset_set_bit failed");
                                return r;
                        }
+                       __clear_bit(index, dirty_regions);
                }
                index++;
        }
@@ -721,7 +757,7 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
                if (word == cmd->nr_words)
                        break;
 
-               r = __update_metadata_word(cmd, word);
+               r = __update_metadata_word(cmd, dmap->dirty_regions, word);
 
                if (r)
                        return r;
@@ -743,15 +779,17 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
        return 0;
 }
 
-int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
+int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
 {
-       int r = -EPERM;
+       int r = 0;
        struct dirty_map *dmap, *next_dmap;
 
        down_write(&cmd->lock);
 
-       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
+       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+               r = -EPERM;
                goto out;
+       }
 
        /* Get current dirty bitmap */
        dmap = cmd->current_dmap;
@@ -763,7 +801,7 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
         * The last commit failed, so we don't have a clean dirty-bitmap to
         * use.
         */
-       if (WARN_ON(next_dmap->changed)) {
+       if (WARN_ON(next_dmap->changed || cmd->committing_dmap)) {
                r = -EINVAL;
                goto out;
        }
@@ -773,11 +811,33 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
        cmd->current_dmap = next_dmap;
        spin_unlock_irq(&cmd->bitmap_lock);
 
-       /*
-        * No one is accessing the old dirty bitmap anymore, so we can flush
-        * it.
-        */
-       r = __flush_dmap(cmd, dmap);
+       /* Set old dirty bitmap as currently committing */
+       cmd->committing_dmap = dmap;
+out:
+       up_write(&cmd->lock);
+
+       return r;
+}
+
+int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
+{
+       int r = -EPERM;
+
+       down_write(&cmd->lock);
+
+       if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
+               goto out;
+
+       if (WARN_ON(!cmd->committing_dmap)) {
+               r = -EINVAL;
+               goto out;
+       }
+
+       r = __flush_dmap(cmd, cmd->committing_dmap);
+       if (!r) {
+               /* Clear committing dmap */
+               cmd->committing_dmap = NULL;
+       }
 out:
        up_write(&cmd->lock);
 
@@ -802,6 +862,7 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
        dmap = cmd->current_dmap;
 
        __set_bit(word, dmap->dirty_words);
+       __set_bit(region_nr, dmap->dirty_regions);
        __set_bit(region_nr, cmd->region_map);
        dmap->changed = 1;
 
@@ -830,6 +891,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
                if (!test_bit(region_nr, cmd->region_map)) {
                        word = region_nr / BITS_PER_LONG;
                        __set_bit(word, dmap->dirty_words);
+                       __set_bit(region_nr, dmap->dirty_regions);
                        __set_bit(region_nr, cmd->region_map);
                        dmap->changed = 1;
                }
index 3fe50a781c1161a1050707331fa2c28169a8a917..14af1ebd853fd2048acc445e0c8b8e8a978b28c5 100644 (file)
@@ -75,7 +75,23 @@ void dm_clone_metadata_close(struct dm_clone_metadata *cmd);
 
 /*
  * Commit dm-clone metadata to disk.
+ *
+ * We use a two phase commit:
+ *
+ * 1. dm_clone_metadata_pre_commit(): Prepare the current transaction for
+ *    committing. After this is called, all subsequent metadata updates, done
+ *    through either dm_clone_set_region_hydrated() or
+ *    dm_clone_cond_set_range(), will be part of the **next** transaction.
+ *
+ * 2. dm_clone_metadata_commit(): Actually commit the current transaction to
+ *    disk and start a new transaction.
+ *
+ * This allows dm-clone to flush the destination device after step (1) to
+ * ensure that all freshly hydrated regions, for which we are updating the
+ * metadata, are properly written to non-volatile storage and won't be lost in
+ * case of a crash.
  */
+int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd);
 int dm_clone_metadata_commit(struct dm_clone_metadata *cmd);
 
 /*
@@ -112,6 +128,7 @@ int dm_clone_metadata_abort(struct dm_clone_metadata *cmd);
  * Switches metadata to a read only mode. Once read-only mode has been entered
  * the following functions will return -EPERM:
  *
+ *   dm_clone_metadata_pre_commit()
  *   dm_clone_metadata_commit()
  *   dm_clone_set_region_hydrated()
  *   dm_clone_cond_set_range()
index b3d89072d21c6a26461dd9a60d32eaccf68200a5..d1e1b5b56b1bbb4ad205889930cc1db9d9bad02e 100644 (file)
@@ -86,6 +86,12 @@ struct clone {
 
        struct dm_clone_metadata *cmd;
 
+       /*
+        * bio used to flush the destination device, before committing the
+        * metadata.
+        */
+       struct bio flush_bio;
+
        /* Region hydration hash table */
        struct hash_table_bucket *ht;
 
@@ -1108,10 +1114,13 @@ static bool need_commit_due_to_time(struct clone *clone)
 /*
  * A non-zero return indicates read-only or fail mode.
  */
-static int commit_metadata(struct clone *clone)
+static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
 {
        int r = 0;
 
+       if (dest_dev_flushed)
+               *dest_dev_flushed = false;
+
        mutex_lock(&clone->commit_lock);
 
        if (!dm_clone_changed_this_transaction(clone->cmd))
@@ -1122,8 +1131,26 @@ static int commit_metadata(struct clone *clone)
                goto out;
        }
 
-       r = dm_clone_metadata_commit(clone->cmd);
+       r = dm_clone_metadata_pre_commit(clone->cmd);
+       if (unlikely(r)) {
+               __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
+               goto out;
+       }
 
+       bio_reset(&clone->flush_bio);
+       bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
+       clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+       r = submit_bio_wait(&clone->flush_bio);
+       if (unlikely(r)) {
+               __metadata_operation_failed(clone, "flush destination device", r);
+               goto out;
+       }
+
+       if (dest_dev_flushed)
+               *dest_dev_flushed = true;
+
+       r = dm_clone_metadata_commit(clone->cmd);
        if (unlikely(r)) {
                __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
                goto out;
@@ -1194,6 +1221,7 @@ static void process_deferred_bios(struct clone *clone)
 static void process_deferred_flush_bios(struct clone *clone)
 {
        struct bio *bio;
+       bool dest_dev_flushed;
        struct bio_list bios = BIO_EMPTY_LIST;
        struct bio_list bio_completions = BIO_EMPTY_LIST;
 
@@ -1213,7 +1241,7 @@ static void process_deferred_flush_bios(struct clone *clone)
            !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
                return;
 
-       if (commit_metadata(clone)) {
+       if (commit_metadata(clone, &dest_dev_flushed)) {
                bio_list_merge(&bios, &bio_completions);
 
                while ((bio = bio_list_pop(&bios)))
@@ -1227,8 +1255,17 @@ static void process_deferred_flush_bios(struct clone *clone)
        while ((bio = bio_list_pop(&bio_completions)))
                bio_endio(bio);
 
-       while ((bio = bio_list_pop(&bios)))
-               generic_make_request(bio);
+       while ((bio = bio_list_pop(&bios))) {
+               if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
+                       /* We just flushed the destination device as part of
+                        * the metadata commit, so there is no reason to send
+                        * another flush.
+                        */
+                       bio_endio(bio);
+               } else {
+                       generic_make_request(bio);
+               }
+       }
 }
 
 static void do_worker(struct work_struct *work)
@@ -1400,7 +1437,7 @@ static void clone_status(struct dm_target *ti, status_type_t type,
 
                /* Commit to ensure statistics aren't out-of-date */
                if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
-                       (void) commit_metadata(clone);
+                       (void) commit_metadata(clone, NULL);
 
                r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
 
@@ -1834,6 +1871,7 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        bio_list_init(&clone->deferred_flush_completions);
        clone->hydration_offset = 0;
        atomic_set(&clone->hydrations_in_flight, 0);
+       bio_init(&clone->flush_bio, NULL, 0);
 
        clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
        if (!clone->wq) {
@@ -1907,6 +1945,7 @@ static void clone_dtr(struct dm_target *ti)
        struct clone *clone = ti->private;
 
        mutex_destroy(&clone->commit_lock);
+       bio_uninit(&clone->flush_bio);
 
        for (i = 0; i < clone->nr_ctr_args; i++)
                kfree(clone->ctr_args[i]);
@@ -1961,7 +2000,7 @@ static void clone_postsuspend(struct dm_target *ti)
        wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
        flush_workqueue(clone->wq);
 
-       (void) commit_metadata(clone);
+       (void) commit_metadata(clone, NULL);
 }
 
 static void clone_resume(struct dm_target *ti)
index dbcc1e41cd57dd5a669466a6907a70f78a16014f..e0c32793c24872a07cd504202447fcd102056369 100644 (file)
@@ -599,45 +599,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
        return pgpath;
 }
 
-static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
-{
-       struct pgpath *pgpath;
-       unsigned long flags;
-
-       /* Do we need to select a new pgpath? */
-       /*
-        * FIXME: currently only switching path if no path (due to failure, etc)
-        * - which negates the point of using a path selector
-        */
-       pgpath = READ_ONCE(m->current_pgpath);
-       if (!pgpath)
-               pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
-
-       if (!pgpath) {
-               if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-                       /* Queue for the daemon to resubmit */
-                       spin_lock_irqsave(&m->lock, flags);
-                       bio_list_add(&m->queued_bios, bio);
-                       spin_unlock_irqrestore(&m->lock, flags);
-                       queue_work(kmultipathd, &m->process_queued_bios);
-
-                       return ERR_PTR(-EAGAIN);
-               }
-               return NULL;
-       }
-
-       return pgpath;
-}
-
 static int __multipath_map_bio(struct multipath *m, struct bio *bio,
                               struct dm_mpath_io *mpio)
 {
-       struct pgpath *pgpath;
-
-       if (!m->hw_handler_name)
-               pgpath = __map_bio_fast(m, bio);
-       else
-               pgpath = __map_bio(m, bio);
+       struct pgpath *pgpath = __map_bio(m, bio);
 
        if (IS_ERR(pgpath))
                return DM_MAPIO_SUBMITTED;
index 3c50c4e4da8f39ee56703f9e0abc2461f8b96659..963d3774c93e287432966b658088914b67f10da3 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/dm-bufio.h>
 
 #define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32       /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U      /* 16KB */
 
 #define DM_PREFETCH_CHUNKS             12
 
index 4c68a7b93d5edab9192a3ce9c24fcdc79b7e1acf..b88d6d701f5bb26bec60314164b4cd07926fab51 100644 (file)
@@ -188,6 +188,15 @@ struct dm_pool_metadata {
        unsigned long flags;
        sector_t data_block_size;
 
+       /*
+        * Pre-commit callback.
+        *
+        * This allows the thin provisioning target to run a callback before
+        * the metadata are committed.
+        */
+       dm_pool_pre_commit_fn pre_commit_fn;
+       void *pre_commit_context;
+
        /*
         * We reserve a section of the metadata for commit overhead.
         * All reported space does *not* include this.
@@ -826,6 +835,14 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        if (unlikely(!pmd->in_service))
                return 0;
 
+       if (pmd->pre_commit_fn) {
+               r = pmd->pre_commit_fn(pmd->pre_commit_context);
+               if (r < 0) {
+                       DMERR("pre-commit callback failed");
+                       return r;
+               }
+       }
+
        r = __write_changed_details(pmd);
        if (r < 0)
                return r;
@@ -892,6 +909,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
        pmd->in_service = false;
        pmd->bdev = bdev;
        pmd->data_block_size = data_block_size;
+       pmd->pre_commit_fn = NULL;
+       pmd->pre_commit_context = NULL;
 
        r = __create_persistent_data_objects(pmd, format_device);
        if (r) {
@@ -2044,6 +2063,16 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
        return r;
 }
 
+void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
+                                         dm_pool_pre_commit_fn fn,
+                                         void *context)
+{
+       pmd_write_lock_in_core(pmd);
+       pmd->pre_commit_fn = fn;
+       pmd->pre_commit_context = context;
+       pmd_write_unlock(pmd);
+}
+
 int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
 {
        int r = -EINVAL;
index f6be0d733c20267f569b72ab14314d0565425e80..7ef56bd2a7e33974469196486bdf05c9b4bedda3 100644 (file)
@@ -230,6 +230,13 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
  */
 void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd);
 
+/* Pre-commit callback */
+typedef int (*dm_pool_pre_commit_fn)(void *context);
+
+void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
+                                         dm_pool_pre_commit_fn fn,
+                                         void *context);
+
 /*----------------------------------------------------------------*/
 
 #endif
index 5a2c494cb55288e782d0fb4702d4e9efb8cab63a..57626c27a54bb5bc66be7c3832dc05059b2851d7 100644 (file)
@@ -328,6 +328,7 @@ struct pool_c {
        dm_block_t low_water_blocks;
        struct pool_features requested_pf; /* Features requested during table load */
        struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
+       struct bio flush_bio;
 };
 
 /*
@@ -2383,8 +2384,16 @@ static void process_deferred_bios(struct pool *pool)
        while ((bio = bio_list_pop(&bio_completions)))
                bio_endio(bio);
 
-       while ((bio = bio_list_pop(&bios)))
-               generic_make_request(bio);
+       while ((bio = bio_list_pop(&bios))) {
+               /*
+                * The data device was flushed as part of metadata commit,
+                * so complete redundant flushes immediately.
+                */
+               if (bio->bi_opf & REQ_PREFLUSH)
+                       bio_endio(bio);
+               else
+                       generic_make_request(bio);
+       }
 }
 
 static void do_worker(struct work_struct *ws)
@@ -3115,6 +3124,7 @@ static void pool_dtr(struct dm_target *ti)
        __pool_dec(pt->pool);
        dm_put_device(ti, pt->metadata_dev);
        dm_put_device(ti, pt->data_dev);
+       bio_uninit(&pt->flush_bio);
        kfree(pt);
 
        mutex_unlock(&dm_thin_pool_table.mutex);
@@ -3180,6 +3190,29 @@ static void metadata_low_callback(void *context)
        dm_table_event(pool->ti->table);
 }
 
+/*
+ * We need to flush the data device **before** committing the metadata.
+ *
+ * This ensures that the data blocks of any newly inserted mappings are
+ * properly written to non-volatile storage and won't be lost in case of a
+ * crash.
+ *
+ * Failure to do so can result in data corruption in the case of internal or
+ * external snapshots and in the case of newly provisioned blocks, when block
+ * zeroing is enabled.
+ */
+static int metadata_pre_commit_callback(void *context)
+{
+       struct pool_c *pt = context;
+       struct bio *flush_bio = &pt->flush_bio;
+
+       bio_reset(flush_bio);
+       bio_set_dev(flush_bio, pt->data_dev->bdev);
+       flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+       return submit_bio_wait(flush_bio);
+}
+
 static sector_t get_dev_size(struct block_device *bdev)
 {
        return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
@@ -3348,6 +3381,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        pt->data_dev = data_dev;
        pt->low_water_blocks = low_water_blocks;
        pt->adjusted_pf = pt->requested_pf = pf;
+       bio_init(&pt->flush_bio, NULL, 0);
        ti->num_flush_bios = 1;
 
        /*
@@ -3374,6 +3408,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        if (r)
                goto out_flags_changed;
 
+       dm_pool_register_pre_commit_callback(pt->pool->pmd,
+                                            metadata_pre_commit_callback,
+                                            pt);
+
        pt->callbacks.congested_fn = pool_is_congested;
        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
 
index 3ad18246fcb3c1aba6cd98b69a57b7d40bcf099e..e230052c2107783c2d2410103c8d444eb1ded34f 100644 (file)
@@ -1019,8 +1019,6 @@ void md_bitmap_unplug(struct bitmap *bitmap)
        /* look at each page to see if there are any set bits that need to be
         * flushed out to disk */
        for (i = 0; i < bitmap->storage.file_pages; i++) {
-               if (!bitmap->storage.filemap)
-                       return;
                dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
                need_write = test_and_clear_page_attr(bitmap, i,
                                                      BITMAP_PAGE_NEEDWRITE);
@@ -1338,7 +1336,8 @@ void md_bitmap_daemon_work(struct mddev *mddev)
                                   BITMAP_PAGE_DIRTY))
                        /* bitmap_unplug will handle the rest */
                        break;
-               if (test_and_clear_page_attr(bitmap, j,
+               if (bitmap->storage.filemap &&
+                   test_and_clear_page_attr(bitmap, j,
                                             BITMAP_PAGE_NEEDWRITE)) {
                        write_page(bitmap, bitmap->storage.filemap[j], 0);
                }
@@ -1790,8 +1789,8 @@ void md_bitmap_destroy(struct mddev *mddev)
                return;
 
        md_bitmap_wait_behind_writes(mddev);
-       mempool_destroy(mddev->wb_info_pool);
-       mddev->wb_info_pool = NULL;
+       if (!mddev->serialize_policy)
+               mddev_destroy_serial_pool(mddev, NULL, true);
 
        mutex_lock(&mddev->bitmap_info.mutex);
        spin_lock(&mddev->lock);
@@ -1908,7 +1907,7 @@ int md_bitmap_load(struct mddev *mddev)
                goto out;
 
        rdev_for_each(rdev, mddev)
-               mddev_create_wb_pool(mddev, rdev, true);
+               mddev_create_serial_pool(mddev, rdev, true);
 
        if (mddev_is_clustered(mddev))
                md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
@@ -2475,16 +2474,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
        if (backlog > COUNTER_MAX)
                return -EINVAL;
        mddev->bitmap_info.max_write_behind = backlog;
-       if (!backlog && mddev->wb_info_pool) {
-               /* wb_info_pool is not needed if backlog is zero */
-               mempool_destroy(mddev->wb_info_pool);
-               mddev->wb_info_pool = NULL;
-       } else if (backlog && !mddev->wb_info_pool) {
-               /* wb_info_pool is needed since backlog is not zero */
+       if (!backlog && mddev->serial_info_pool) {
+               /* serial_info_pool is not needed if backlog is zero */
+               if (!mddev->serialize_policy)
+                       mddev_destroy_serial_pool(mddev, NULL, false);
+       } else if (backlog && !mddev->serial_info_pool) {
+               /* serial_info_pool is needed since backlog is not zero */
                struct md_rdev *rdev;
 
                rdev_for_each(rdev, mddev)
-                       mddev_create_wb_pool(mddev, rdev, false);
+                       mddev_create_serial_pool(mddev, rdev, false);
        }
        if (old_mwb != backlog)
                md_bitmap_update_sb(mddev->bitmap);
index 805b33e274967f7320eb2b05e1f5958512981500..4824d50526fabbace9e6f9ad270b65bfbbe734d6 100644 (file)
@@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev)
                mddev->sync_speed_max : sysctl_speed_limit_max;
 }
 
-static int rdev_init_wb(struct md_rdev *rdev)
+static void rdev_uninit_serial(struct md_rdev *rdev)
 {
-       if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+       if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
+               return;
+
+       kvfree(rdev->serial);
+       rdev->serial = NULL;
+}
+
+static void rdevs_uninit_serial(struct mddev *mddev)
+{
+       struct md_rdev *rdev;
+
+       rdev_for_each(rdev, mddev)
+               rdev_uninit_serial(rdev);
+}
+
+static int rdev_init_serial(struct md_rdev *rdev)
+{
+       /* serial_nums equals with BARRIER_BUCKETS_NR */
+       int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
+       struct serial_in_rdev *serial = NULL;
+
+       if (test_bit(CollisionCheck, &rdev->flags))
                return 0;
 
-       spin_lock_init(&rdev->wb_list_lock);
-       INIT_LIST_HEAD(&rdev->wb_list);
-       init_waitqueue_head(&rdev->wb_io_wait);
-       set_bit(WBCollisionCheck, &rdev->flags);
+       serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+                         GFP_KERNEL);
+       if (!serial)
+               return -ENOMEM;
 
-       return 1;
+       for (i = 0; i < serial_nums; i++) {
+               struct serial_in_rdev *serial_tmp = &serial[i];
+
+               spin_lock_init(&serial_tmp->serial_lock);
+               serial_tmp->serial_rb = RB_ROOT_CACHED;
+               init_waitqueue_head(&serial_tmp->serial_io_wait);
+       }
+
+       rdev->serial = serial;
+       set_bit(CollisionCheck, &rdev->flags);
+
+       return 0;
+}
+
+static int rdevs_init_serial(struct mddev *mddev)
+{
+       struct md_rdev *rdev;
+       int ret = 0;
+
+       rdev_for_each(rdev, mddev) {
+               ret = rdev_init_serial(rdev);
+               if (ret)
+                       break;
+       }
+
+       /* Free all resources if pool is not existed */
+       if (ret && !mddev->serial_info_pool)
+               rdevs_uninit_serial(mddev);
+
+       return ret;
 }
 
 /*
- * Create wb_info_pool if rdev is the first multi-queue device flaged
- * with writemostly, also write-behind mode is enabled.
+ * rdev needs to enable serial stuffs if it meets the conditions:
+ * 1. it is multi-queue device flaged with writemostly.
+ * 2. the write-behind mode is enabled.
  */
-void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
-                         bool is_suspend)
+static int rdev_need_serial(struct md_rdev *rdev)
 {
-       if (mddev->bitmap_info.max_write_behind == 0)
-               return;
+       return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
+               rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+               test_bit(WriteMostly, &rdev->flags));
+}
+
+/*
+ * Init resource for rdev(s), then create serial_info_pool if:
+ * 1. rdev is the first device which return true from rdev_enable_serial.
+ * 2. rdev is NULL, means we want to enable serialization for all rdevs.
+ */
+void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+                             bool is_suspend)
+{
+       int ret = 0;
 
-       if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+       if (rdev && !rdev_need_serial(rdev) &&
+           !test_bit(CollisionCheck, &rdev->flags))
                return;
 
-       if (mddev->wb_info_pool == NULL) {
+       if (!is_suspend)
+               mddev_suspend(mddev);
+
+       if (!rdev)
+               ret = rdevs_init_serial(mddev);
+       else
+               ret = rdev_init_serial(rdev);
+       if (ret)
+               goto abort;
+
+       if (mddev->serial_info_pool == NULL) {
                unsigned int noio_flag;
 
-               if (!is_suspend)
-                       mddev_suspend(mddev);
                noio_flag = memalloc_noio_save();
-               mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
-                                                       sizeof(struct wb_info));
+               mddev->serial_info_pool =
+                       mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+                                               sizeof(struct serial_info));
                memalloc_noio_restore(noio_flag);
-               if (!mddev->wb_info_pool)
-                       pr_err("can't alloc memory pool for writemostly\n");
-               if (!is_suspend)
-                       mddev_resume(mddev);
+               if (!mddev->serial_info_pool) {
+                       rdevs_uninit_serial(mddev);
+                       pr_err("can't alloc memory pool for serialization\n");
+               }
        }
+
+abort:
+       if (!is_suspend)
+               mddev_resume(mddev);
 }
-EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
 
 /*
- * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
+ * Free resource from rdev(s), and destroy serial_info_pool under conditions:
+ * 1. rdev is the last device flaged with CollisionCheck.
+ * 2. when bitmap is destroyed while policy is not enabled.
+ * 3. for disable policy, the pool is destroyed only when no rdev needs it.
  */
-static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
+void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+                              bool is_suspend)
 {
-       if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
+       if (rdev && !test_bit(CollisionCheck, &rdev->flags))
                return;
 
-       if (mddev->wb_info_pool) {
+       if (mddev->serial_info_pool) {
                struct md_rdev *temp;
-               int num = 0;
+               int num = 0; /* used to track if other rdevs need the pool */
 
-               /*
-                * Check if other rdevs need wb_info_pool.
-                */
-               rdev_for_each(temp, mddev)
-                       if (temp != rdev &&
-                           test_bit(WBCollisionCheck, &temp->flags))
+               if (!is_suspend)
+                       mddev_suspend(mddev);
+               rdev_for_each(temp, mddev) {
+                       if (!rdev) {
+                               if (!mddev->serialize_policy ||
+                                   !rdev_need_serial(temp))
+                                       rdev_uninit_serial(temp);
+                               else
+                                       num++;
+                       } else if (temp != rdev &&
+                                  test_bit(CollisionCheck, &temp->flags))
                                num++;
-               if (!num) {
-                       mddev_suspend(rdev->mddev);
-                       mempool_destroy(mddev->wb_info_pool);
-                       mddev->wb_info_pool = NULL;
-                       mddev_resume(rdev->mddev);
                }
+
+               if (rdev)
+                       rdev_uninit_serial(rdev);
+
+               if (num)
+                       pr_info("The mempool could be used by other devices\n");
+               else {
+                       mempool_destroy(mddev->serial_info_pool);
+                       mddev->serial_info_pool = NULL;
+               }
+               if (!is_suspend)
+                       mddev_resume(mddev);
        }
 }
 
@@ -1159,6 +1250,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
        /* not spare disk, or LEVEL_MULTIPATH */
        if (sb->level == LEVEL_MULTIPATH ||
                (rdev->desc_nr >= 0 &&
+                rdev->desc_nr < MD_SB_DISKS &&
                 sb->disks[rdev->desc_nr].state &
                 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
                spare_disk = false;
@@ -2336,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
        pr_debug("md: bind<%s>\n", b);
 
        if (mddev->raid_disks)
-               mddev_create_wb_pool(mddev, rdev, false);
+               mddev_create_serial_pool(mddev, rdev, false);
 
        if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
                goto fail;
@@ -2374,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
        bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
        list_del_rcu(&rdev->same_set);
        pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
-       mddev_destroy_wb_pool(rdev->mddev, rdev);
+       mddev_destroy_serial_pool(rdev->mddev, rdev, false);
        rdev->mddev = NULL;
        sysfs_remove_link(&rdev->kobj, "block");
        sysfs_put(rdev->sysfs_state);
@@ -2887,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
                }
        } else if (cmd_match(buf, "writemostly")) {
                set_bit(WriteMostly, &rdev->flags);
-               mddev_create_wb_pool(rdev->mddev, rdev, false);
+               mddev_create_serial_pool(rdev->mddev, rdev, false);
                err = 0;
        } else if (cmd_match(buf, "-writemostly")) {
-               mddev_destroy_wb_pool(rdev->mddev, rdev);
+               mddev_destroy_serial_pool(rdev->mddev, rdev, false);
                clear_bit(WriteMostly, &rdev->flags);
                err = 0;
        } else if (cmd_match(buf, "blocked")) {
@@ -5276,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev =
 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
        fail_last_dev_store);
 
+static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
+{
+       if (mddev->pers == NULL || (mddev->pers->level != 1))
+               return sprintf(page, "n/a\n");
+       else
+               return sprintf(page, "%d\n", mddev->serialize_policy);
+}
+
+/*
+ * Setting serialize_policy to true to enforce write IO is not reordered
+ * for raid1.
+ */
+static ssize_t
+serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
+{
+       int err;
+       bool value;
+
+       err = kstrtobool(buf, &value);
+       if (err)
+               return err;
+
+       if (value == mddev->serialize_policy)
+               return len;
+
+       err = mddev_lock(mddev);
+       if (err)
+               return err;
+       if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+               pr_err("md: serialize_policy is only effective for raid1\n");
+               err = -EINVAL;
+               goto unlock;
+       }
+
+       mddev_suspend(mddev);
+       if (value)
+               mddev_create_serial_pool(mddev, NULL, true);
+       else
+               mddev_destroy_serial_pool(mddev, NULL, true);
+       mddev->serialize_policy = value;
+       mddev_resume(mddev);
+unlock:
+       mddev_unlock(mddev);
+       return err ?: len;
+}
+
+static struct md_sysfs_entry md_serialize_policy =
+__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
+       serialize_policy_store);
+
+
 static struct attribute *md_default_attrs[] = {
        &md_level.attr,
        &md_layout.attr,
@@ -5293,6 +5436,7 @@ static struct attribute *md_default_attrs[] = {
        &max_corr_read_errors.attr,
        &md_consistency_policy.attr,
        &md_fail_last_dev.attr,
+       &md_serialize_policy.attr,
        NULL,
 };
 
@@ -5768,18 +5912,18 @@ int md_run(struct mddev *mddev)
                goto bitmap_abort;
 
        if (mddev->bitmap_info.max_write_behind > 0) {
-               bool creat_pool = false;
+               bool create_pool = false;
 
                rdev_for_each(rdev, mddev) {
                        if (test_bit(WriteMostly, &rdev->flags) &&
-                           rdev_init_wb(rdev))
-                               creat_pool = true;
-               }
-               if (creat_pool && mddev->wb_info_pool == NULL) {
-                       mddev->wb_info_pool =
-                               mempool_create_kmalloc_pool(NR_WB_INFOS,
-                                                   sizeof(struct wb_info));
-                       if (!mddev->wb_info_pool) {
+                           rdev_init_serial(rdev))
+                               create_pool = true;
+               }
+               if (create_pool && mddev->serial_info_pool == NULL) {
+                       mddev->serial_info_pool =
+                               mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+                                                   sizeof(struct serial_info));
+                       if (!mddev->serial_info_pool) {
                                err = -ENOMEM;
                                goto bitmap_abort;
                        }
@@ -6024,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev)
                        mddev->in_sync = 1;
                md_update_sb(mddev, 1);
        }
-       mempool_destroy(mddev->wb_info_pool);
-       mddev->wb_info_pool = NULL;
+       /* disable policy to guarantee rdevs free resources for serialization */
+       mddev->serialize_policy = 0;
+       mddev_destroy_serial_pool(mddev, NULL, true);
 }
 
 void md_stop_writes(struct mddev *mddev)
index 5f86f8adb0a48278bc003707d5d468cf441b1f5a..acd681939112fe1440e2df53465d10348aa50758 100644 (file)
  * be retried.
  */
 #define        MD_FAILFAST     (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+
+/*
+ * The struct embedded in rdev is used to serialize IO.
+ */
+struct serial_in_rdev {
+       struct rb_root_cached serial_rb;
+       spinlock_t serial_lock;
+       wait_queue_head_t serial_io_wait;
+};
+
 /*
  * MD's 'extended' device
  */
@@ -110,12 +120,7 @@ struct md_rdev {
                                           * in superblock.
                                           */
 
-       /*
-        * The members for check collision of write behind IOs.
-        */
-       struct list_head wb_list;
-       spinlock_t wb_list_lock;
-       wait_queue_head_t wb_io_wait;
+       struct serial_in_rdev *serial;  /* used for raid1 io serialization */
 
        struct work_struct del_work;    /* used for delayed sysfs removal */
 
@@ -201,9 +206,9 @@ enum flag_bits {
                                 * it didn't fail, so don't use FailFast
                                 * any more for metadata
                                 */
-       WBCollisionCheck,       /*
-                                * multiqueue device should check if there
-                                * is collision between write behind bios.
+       CollisionCheck,         /*
+                                * check if there is collision between raid1
+                                * serial bios.
                                 */
 };
 
@@ -263,12 +268,13 @@ enum mddev_sb_flags {
        MD_SB_NEED_REWRITE,     /* metadata write needs to be repeated */
 };
 
-#define NR_WB_INFOS    8
-/* record current range of write behind IOs */
-struct wb_info {
-       sector_t lo;
-       sector_t hi;
-       struct list_head list;
+#define NR_SERIAL_INFOS                8
+/* record current range of serialize IOs */
+struct serial_info {
+       struct rb_node node;
+       sector_t start;         /* start sector of rb node */
+       sector_t last;          /* end sector of rb node */
+       sector_t _subtree_last; /* highest sector in subtree of rb node */
 };
 
 struct mddev {
@@ -487,13 +493,14 @@ struct mddev {
                                          */
        struct work_struct flush_work;
        struct work_struct event_work;  /* used by dm to report failure event */
-       mempool_t *wb_info_pool;
+       mempool_t *serial_info_pool;
        void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
        struct md_cluster_info          *cluster_info;
        unsigned int                    good_device_nr; /* good device num within cluster raid */
 
        bool    has_superblocks:1;
        bool    fail_last_dev:1;
+       bool    serialize_policy:1;
 };
 
 enum recovery_flags {
@@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
 extern void md_update_sb(struct mddev *mddev, int force);
 extern void md_kick_rdev_from_array(struct md_rdev * rdev);
-extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
-                                bool is_suspend);
+extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+                                    bool is_suspend);
+extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+                                     bool is_suspend);
 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
 
index 21ea537bd55e9984f7cfe5b908a3d6bcad9038e9..eff04fa23dfad46d7d43dee24cde0a1fd90f2f68 100644 (file)
@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
        struct btree_node *right = r->n;
        uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
        uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
-       unsigned threshold = 2 * merge_threshold(left) + 1;
+       /*
+        * Ensure the number of entries in each child will be greater
+        * than or equal to (max_entries / 3 + 1), so no matter which
+        * child is used for removal, the number will still be not
+        * less than (max_entries / 3).
+        */
+       unsigned int threshold = 2 * (merge_threshold(left) + 1);
 
        if (nr_left + nr_right < threshold) {
                /*
index b7c20979bd19a68205dbbdfe5fd05abf5e3ef6c3..322386ff5d225dfb463c68627a885252f76e42f4 100644 (file)
@@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        char b[BDEVNAME_SIZE];
        char b2[BDEVNAME_SIZE];
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
-       unsigned short blksize = 512;
+       unsigned blksize = 512;
 
        *private_conf = ERR_PTR(-ENOMEM);
        if (!conf)
index a409ab6f30bc33375561d4cebc20c5e20f9435ba..cd810e19508619db468fc777028e9620a6da055c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/ratelimit.h>
+#include <linux/interval_tree_generic.h>
 
 #include <trace/events/block.h>
 
@@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
 
 #include "raid1-10.c"
 
-static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
+                    START, LAST, static inline, raid1_rb);
+
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+                               struct serial_info *si, int idx)
 {
-       struct wb_info *wi, *temp_wi;
        unsigned long flags;
        int ret = 0;
-       struct mddev *mddev = rdev->mddev;
-
-       wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
-
-       spin_lock_irqsave(&rdev->wb_list_lock, flags);
-       list_for_each_entry(temp_wi, &rdev->wb_list, list) {
-               /* collision happened */
-               if (hi > temp_wi->lo && lo < temp_wi->hi) {
-                       ret = -EBUSY;
-                       break;
-               }
+       sector_t lo = r1_bio->sector;
+       sector_t hi = lo + r1_bio->sectors;
+       struct serial_in_rdev *serial = &rdev->serial[idx];
+
+       spin_lock_irqsave(&serial->serial_lock, flags);
+       /* collision happened */
+       if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+               ret = -EBUSY;
+       else {
+               si->start = lo;
+               si->last = hi;
+               raid1_rb_insert(si, &serial->serial_rb);
        }
-
-       if (!ret) {
-               wi->lo = lo;
-               wi->hi = hi;
-               list_add(&wi->list, &rdev->wb_list);
-       } else
-               mempool_free(wi, mddev->wb_info_pool);
-       spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+       spin_unlock_irqrestore(&serial->serial_lock, flags);
 
        return ret;
 }
 
-static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+       struct mddev *mddev = rdev->mddev;
+       struct serial_info *si;
+       int idx = sector_to_idx(r1_bio->sector);
+       struct serial_in_rdev *serial = &rdev->serial[idx];
+
+       if (WARN_ON(!mddev->serial_info_pool))
+               return;
+       si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+       wait_event(serial->serial_io_wait,
+                  check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+}
+
+static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
 {
-       struct wb_info *wi;
+       struct serial_info *si;
        unsigned long flags;
        int found = 0;
        struct mddev *mddev = rdev->mddev;
-
-       spin_lock_irqsave(&rdev->wb_list_lock, flags);
-       list_for_each_entry(wi, &rdev->wb_list, list)
-               if (hi == wi->hi && lo == wi->lo) {
-                       list_del(&wi->list);
-                       mempool_free(wi, mddev->wb_info_pool);
+       int idx = sector_to_idx(lo);
+       struct serial_in_rdev *serial = &rdev->serial[idx];
+
+       spin_lock_irqsave(&serial->serial_lock, flags);
+       for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+            si; si = raid1_rb_iter_next(si, lo, hi)) {
+               if (si->start == lo && si->last == hi) {
+                       raid1_rb_remove(si, &serial->serial_rb);
+                       mempool_free(si, mddev->serial_info_pool);
                        found = 1;
                        break;
                }
-
+       }
        if (!found)
-               WARN(1, "The write behind IO is not recorded\n");
-       spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
-       wake_up(&rdev->wb_io_wait);
+               WARN(1, "The write IO is not recorded for serialization\n");
+       spin_unlock_irqrestore(&serial->serial_lock, flags);
+       wake_up(&serial->serial_io_wait);
 }
 
 /*
@@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio)
        int mirror = find_bio_disk(r1_bio, bio);
        struct md_rdev *rdev = conf->mirrors[mirror].rdev;
        bool discard_error;
+       sector_t lo = r1_bio->sector;
+       sector_t hi = r1_bio->sector + r1_bio->sectors;
 
        discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
@@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio)
        }
 
        if (behind) {
-               if (test_bit(WBCollisionCheck, &rdev->flags)) {
-                       sector_t lo = r1_bio->sector;
-                       sector_t hi = r1_bio->sector + r1_bio->sectors;
-
-                       remove_wb(rdev, lo, hi);
-               }
+               if (test_bit(CollisionCheck, &rdev->flags))
+                       remove_serial(rdev, lo, hi);
                if (test_bit(WriteMostly, &rdev->flags))
                        atomic_dec(&r1_bio->behind_remaining);
 
@@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio)
                                call_bio_endio(r1_bio);
                        }
                }
-       }
+       } else if (rdev->mddev->serialize_policy)
+               remove_serial(rdev, lo, hi);
        if (r1_bio->bios[mirror] == NULL)
                rdev_dec_pending(rdev, conf->mddev);
 
@@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
        for (i = 0; i < disks; i++) {
                struct bio *mbio = NULL;
+               struct md_rdev *rdev = conf->mirrors[i].rdev;
                if (!r1_bio->bios[i])
                        continue;
 
@@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                        mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
 
                if (r1_bio->behind_master_bio) {
-                       struct md_rdev *rdev = conf->mirrors[i].rdev;
-
-                       if (test_bit(WBCollisionCheck, &rdev->flags)) {
-                               sector_t lo = r1_bio->sector;
-                               sector_t hi = r1_bio->sector + r1_bio->sectors;
-
-                               wait_event(rdev->wb_io_wait,
-                                          check_and_add_wb(rdev, lo, hi) == 0);
-                       }
+                       if (test_bit(CollisionCheck, &rdev->flags))
+                               wait_for_serialization(rdev, r1_bio);
                        if (test_bit(WriteMostly, &rdev->flags))
                                atomic_inc(&r1_bio->behind_remaining);
-               }
+               } else if (mddev->serialize_policy)
+                       wait_for_serialization(rdev, r1_bio);
 
                r1_bio->bios[i] = mbio;
 
@@ -2782,7 +2793,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                                write_targets++;
                        }
                }
-               if (bio->bi_end_io) {
+               if (rdev && bio->bi_end_io) {
                        atomic_inc(&rdev->nr_pending);
                        bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
                        bio_set_dev(bio, rdev->bdev);
index cab5b1352892fbd3584aec57e2180c51b01f6a23..d50238d0a85db0bf29f9ef67cd7ef23c826d8724 100644 (file)
@@ -1360,7 +1360,7 @@ int ppl_init_log(struct r5conf *conf)
                return -EINVAL;
        }
 
-       max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
+       max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
                BITS_PER_BYTE;
        if (conf->raid_disks > max_disks) {
                pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
index f0fc538bfe597f7a1179b31eeaa8a8bfa837101a..ba00e9877f025caff86ae8efa93029f9664f2085 100644 (file)
@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
                                do_flush = false;
                        }
 
-                       if (!sh->batch_head)
+                       if (!sh->batch_head || sh == sh->batch_head)
                                set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if ((!sh->batch_head || sh == sh->batch_head) &&
@@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
 
 static int alloc_thread_groups(struct r5conf *conf, int cnt,
                               int *group_cnt,
-                              int *worker_cnt_per_group,
                               struct r5worker_group **worker_groups);
 static ssize_t
 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
@@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
        unsigned int new;
        int err;
        struct r5worker_group *new_groups, *old_groups;
-       int group_cnt, worker_cnt_per_group;
+       int group_cnt;
 
        if (len >= PAGE_SIZE)
                return -EINVAL;
@@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
                if (old_groups)
                        flush_workqueue(raid5_wq);
 
-               err = alloc_thread_groups(conf, new,
-                                         &group_cnt, &worker_cnt_per_group,
-                                         &new_groups);
+               err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
                if (!err) {
                        spin_lock_irq(&conf->device_lock);
                        conf->group_cnt = group_cnt;
-                       conf->worker_cnt_per_group = worker_cnt_per_group;
+                       conf->worker_cnt_per_group = new;
                        conf->worker_groups = new_groups;
                        spin_unlock_irq(&conf->device_lock);
 
@@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = {
        .attrs = raid5_attrs,
 };
 
-static int alloc_thread_groups(struct r5conf *conf, int cnt,
-                              int *group_cnt,
-                              int *worker_cnt_per_group,
+static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
                               struct r5worker_group **worker_groups)
 {
        int i, j, k;
        ssize_t size;
        struct r5worker *workers;
 
-       *worker_cnt_per_group = cnt;
        if (cnt == 0) {
                *group_cnt = 0;
                *worker_groups = NULL;
@@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        struct disk_info *disk;
        char pers_name[6];
        int i;
-       int group_cnt, worker_cnt_per_group;
+       int group_cnt;
        struct r5worker_group *new_group;
        int ret;
 
@@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        for (i = 0; i < PENDING_IO_MAX; i++)
                list_add(&conf->pending_data[i].sibling, &conf->free_list);
        /* Don't enable multi-threading by default*/
-       if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
-                                &new_group)) {
+       if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
                conf->group_cnt = group_cnt;
-               conf->worker_cnt_per_group = worker_cnt_per_group;
+               conf->worker_cnt_per_group = 0;
                conf->worker_groups = new_group;
        } else
                goto abort;
index 9340435a94a095d77bddcb12b6331940878d1c8e..6c95dc471d4c6c63d53f5fdab61acd150bf15a40 100644 (file)
@@ -380,7 +380,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
        } else {
                list_del_init(&data->list);
                if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
-                       data->adap->transmit_queue_sz--;
+                       if (!WARN_ON(!data->adap->transmit_queue_sz))
+                               data->adap->transmit_queue_sz--;
        }
 
        if (data->msg.tx_status & CEC_TX_STATUS_OK) {
@@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap)
                 * need to do anything special in that case.
                 */
        }
+       /*
+        * If something went wrong and this counter isn't what it should
+        * be, then this will reset it back to 0. Warn if it is not 0,
+        * since it indicates a bug, either in this framework or in a
+        * CEC driver.
+        */
+       if (WARN_ON(adap->transmit_queue_sz))
+               adap->transmit_queue_sz = 0;
 }
 
 /*
@@ -456,7 +465,7 @@ int cec_thread_func(void *_adap)
                bool timeout = false;
                u8 attempts;
 
-               if (adap->transmitting) {
+               if (adap->transmit_in_progress) {
                        int err;
 
                        /*
@@ -491,7 +500,7 @@ int cec_thread_func(void *_adap)
                        goto unlock;
                }
 
-               if (adap->transmitting && timeout) {
+               if (adap->transmit_in_progress && timeout) {
                        /*
                         * If we timeout, then log that. Normally this does
                         * not happen and it is an indication of a faulty CEC
@@ -500,14 +509,18 @@ int cec_thread_func(void *_adap)
                         * so much traffic on the bus that the adapter was
                         * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
                         */
-                       pr_warn("cec-%s: message %*ph timed out\n", adap->name,
-                               adap->transmitting->msg.len,
-                               adap->transmitting->msg.msg);
+                       if (adap->transmitting) {
+                               pr_warn("cec-%s: message %*ph timed out\n", adap->name,
+                                       adap->transmitting->msg.len,
+                                       adap->transmitting->msg.msg);
+                               /* Just give up on this. */
+                               cec_data_cancel(adap->transmitting,
+                                               CEC_TX_STATUS_TIMEOUT);
+                       } else {
+                               pr_warn("cec-%s: transmit timed out\n", adap->name);
+                       }
                        adap->transmit_in_progress = false;
                        adap->tx_timeouts++;
-                       /* Just give up on this. */
-                       cec_data_cancel(adap->transmitting,
-                                       CEC_TX_STATUS_TIMEOUT);
                        goto unlock;
                }
 
@@ -522,7 +535,8 @@ int cec_thread_func(void *_adap)
                data = list_first_entry(&adap->transmit_queue,
                                        struct cec_data, list);
                list_del_init(&data->list);
-               adap->transmit_queue_sz--;
+               if (!WARN_ON(!data->adap->transmit_queue_sz))
+                       adap->transmit_queue_sz--;
 
                /* Make this the current transmitting message */
                adap->transmitting = data;
@@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
                        valid_la = false;
                else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
                        valid_la = false;
-               else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+               else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
                        valid_la = false;
                else if (cec_msg_is_broadcast(msg) &&
-                        adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
-                        !(dir_fl & BCAST2_0))
+                        adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
+                        !(dir_fl & BCAST1_4))
                        valid_la = false;
        }
        if (valid_la && min_len) {
index 04d51ca6322399589fc62345044146882de643dd..4c8c96a35282185c77845bb449279304acb3b907 100644 (file)
@@ -105,7 +105,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
                        if (nums[i-1] + 1 != nums[i])
                                goto fail_map;
                buf->vaddr = (__force void *)
-                       ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+                       ioremap(__pfn_to_phys(nums[0]), size + offset);
        } else {
                buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
                                        PAGE_KERNEL);
index fd47bd07ffd8070e53a48b37fa3e2c30b0c2d350..2f1eeeb6e7c748c035578e095d493e2b805bfa77 100644 (file)
@@ -938,7 +938,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
        /* map io memory */
        CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
                   (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
-       cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
+       cx->enc_mem = ioremap(cx->base_addr + CX18_MEM_OFFSET,
                                       CX18_MEM_SIZE);
        if (!cx->enc_mem) {
                CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
index 3f3f40ea890bbc045376a3d15400d336143fc2ec..1f79700a6307a7870717dafef10f25b575c73421 100644 (file)
@@ -1042,7 +1042,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
        /* map io memory */
        IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
                   (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
-       itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
+       itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET,
                                       IVTV_ENCODER_SIZE);
        if (!itv->enc_mem) {
                IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
@@ -1056,7 +1056,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
        if (itv->has_cx23415) {
                IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
                                (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
-               itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
+               itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET,
                                IVTV_DECODER_SIZE);
                if (!itv->dec_mem) {
                        IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
@@ -1075,7 +1075,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
        IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
                   (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        itv->reg_mem =
-           ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+           ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        if (!itv->reg_mem) {
                IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
                IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
index 95a56cce9b657c32cff59be0950f9c12a4045153..1daf9e07cad7484eaa5e66b2b8a8d6fdcc9ffb37 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/ivtvfb.h>
 
 #ifdef CONFIG_X86_64
-#include <asm/pat.h>
+#include <asm/memtype.h>
 #endif
 
 /* card parameters */
index f299baf7cbe084fa57ac0581d67c9253ec0c1c71..e06d113dfe9668d707e32f360091c563d679170b 100644 (file)
@@ -883,7 +883,7 @@ static int dm355_ccdc_probe(struct platform_device *pdev)
                goto fail_nores;
        }
 
-       ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+       ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
        if (!ccdc_cfg.base_addr) {
                status = -ENOMEM;
                goto fail_nomem;
index 2fc6c9c38f9ccd8aa4ce1fe79b4c4d499dbb72bc..c6378c4e00745db3bbf8a570fc96f242ebbb4317 100644 (file)
@@ -817,7 +817,7 @@ static int dm644x_ccdc_probe(struct platform_device *pdev)
                goto fail_nores;
        }
 
-       ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res));
+       ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
        if (!ccdc_cfg.base_addr) {
                status = -ENOMEM;
                goto fail_nomem;
index e2e7ab7b7f45b8698fbb82ee3983f9d7b234bbda..b49378b18e5d7409ac1fd0de78cc9e7d333fc777 100644 (file)
@@ -1045,7 +1045,7 @@ static int isif_probe(struct platform_device *pdev)
                        status = -EBUSY;
                        goto fail_nobase_res;
                }
-               addr = ioremap_nocache(res->start, resource_size(res));
+               addr = ioremap(res->start, resource_size(res));
                if (!addr) {
                        status = -ENOMEM;
                        goto fail_base_iomap;
index 97d660606d9845981666cdf54f2a88bb5e511bc9..4dbdf3180d1080a334449961a727e88b4aaadc9a 100644 (file)
@@ -753,7 +753,7 @@ static const struct preview_update update_attrs[] = {
                preview_config_luma_enhancement,
                preview_enable_luma_enhancement,
                offsetof(struct prev_params, luma),
-               FIELD_SIZEOF(struct prev_params, luma),
+               sizeof_field(struct prev_params, luma),
                offsetof(struct omap3isp_prev_update_config, luma),
        }, /* OMAP3ISP_PREV_INVALAW */ {
                NULL,
@@ -762,55 +762,55 @@ static const struct preview_update update_attrs[] = {
                preview_config_hmed,
                preview_enable_hmed,
                offsetof(struct prev_params, hmed),
-               FIELD_SIZEOF(struct prev_params, hmed),
+               sizeof_field(struct prev_params, hmed),
                offsetof(struct omap3isp_prev_update_config, hmed),
        }, /* OMAP3ISP_PREV_CFA */ {
                preview_config_cfa,
                NULL,
                offsetof(struct prev_params, cfa),
-               FIELD_SIZEOF(struct prev_params, cfa),
+               sizeof_field(struct prev_params, cfa),
                offsetof(struct omap3isp_prev_update_config, cfa),
        }, /* OMAP3ISP_PREV_CHROMA_SUPP */ {
                preview_config_chroma_suppression,
                preview_enable_chroma_suppression,
                offsetof(struct prev_params, csup),
-               FIELD_SIZEOF(struct prev_params, csup),
+               sizeof_field(struct prev_params, csup),
                offsetof(struct omap3isp_prev_update_config, csup),
        }, /* OMAP3ISP_PREV_WB */ {
                preview_config_whitebalance,
                NULL,
                offsetof(struct prev_params, wbal),
-               FIELD_SIZEOF(struct prev_params, wbal),
+               sizeof_field(struct prev_params, wbal),
                offsetof(struct omap3isp_prev_update_config, wbal),
        }, /* OMAP3ISP_PREV_BLKADJ */ {
                preview_config_blkadj,
                NULL,
                offsetof(struct prev_params, blkadj),
-               FIELD_SIZEOF(struct prev_params, blkadj),
+               sizeof_field(struct prev_params, blkadj),
                offsetof(struct omap3isp_prev_update_config, blkadj),
        }, /* OMAP3ISP_PREV_RGB2RGB */ {
                preview_config_rgb_blending,
                NULL,
                offsetof(struct prev_params, rgb2rgb),
-               FIELD_SIZEOF(struct prev_params, rgb2rgb),
+               sizeof_field(struct prev_params, rgb2rgb),
                offsetof(struct omap3isp_prev_update_config, rgb2rgb),
        }, /* OMAP3ISP_PREV_COLOR_CONV */ {
                preview_config_csc,
                NULL,
                offsetof(struct prev_params, csc),
-               FIELD_SIZEOF(struct prev_params, csc),
+               sizeof_field(struct prev_params, csc),
                offsetof(struct omap3isp_prev_update_config, csc),
        }, /* OMAP3ISP_PREV_YC_LIMIT */ {
                preview_config_yc_range,
                NULL,
                offsetof(struct prev_params, yclimit),
-               FIELD_SIZEOF(struct prev_params, yclimit),
+               sizeof_field(struct prev_params, yclimit),
                offsetof(struct omap3isp_prev_update_config, yclimit),
        }, /* OMAP3ISP_PREV_DEFECT_COR */ {
                preview_config_dcor,
                preview_enable_dcor,
                offsetof(struct prev_params, dcor),
-               FIELD_SIZEOF(struct prev_params, dcor),
+               sizeof_field(struct prev_params, dcor),
                offsetof(struct omap3isp_prev_update_config, dcor),
        }, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ {
                NULL,
@@ -828,13 +828,13 @@ static const struct preview_update update_attrs[] = {
                preview_config_noisefilter,
                preview_enable_noisefilter,
                offsetof(struct prev_params, nf),
-               FIELD_SIZEOF(struct prev_params, nf),
+               sizeof_field(struct prev_params, nf),
                offsetof(struct omap3isp_prev_update_config, nf),
        }, /* OMAP3ISP_PREV_GAMMA */ {
                preview_config_gammacorrn,
                preview_enable_gammacorrn,
                offsetof(struct prev_params, gamma),
-               FIELD_SIZEOF(struct prev_params, gamma),
+               sizeof_field(struct prev_params, gamma),
                offsetof(struct omap3isp_prev_update_config, gamma),
        }, /* OMAP3ISP_PREV_CONTRAST */ {
                preview_config_contrast,
index a99caac59f444081a64bc6e96f2b6e8fa2f7e720..1ac0c70a5981829afe733ccb4a5066ae3e52625c 100644 (file)
@@ -351,7 +351,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
        if (cec->tegra_cec_irq <= 0)
                return -EBUSY;
 
-       cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+       cec->cec_base = devm_ioremap(&pdev->dev, res->start,
                                             resource_size(res));
 
        if (!cec->cec_base) {
index ac88ade94cdab449ebbd257964426da519899a55..59609556d9692b6e1e97746a8a328bdf8fb4fe89 100644 (file)
@@ -116,6 +116,7 @@ struct pulse8 {
        unsigned int vers;
        struct completion cmd_done;
        struct work_struct work;
+       u8 work_result;
        struct delayed_work ping_eeprom_work;
        struct cec_msg rx_msg;
        u8 data[DATA_SIZE];
@@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
 {
        struct pulse8 *pulse8 =
                container_of(work, struct pulse8, work);
+       u8 result = pulse8->work_result;
 
-       switch (pulse8->data[0] & 0x3f) {
+       pulse8->work_result = 0;
+       switch (result & 0x3f) {
        case MSGCODE_FRAME_DATA:
                cec_received_msg(pulse8->adap, &pulse8->rx_msg);
                break;
@@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
                pulse8->escape = false;
        } else if (data == MSGEND) {
                struct cec_msg *msg = &pulse8->rx_msg;
+               u8 msgcode = pulse8->buf[0];
 
                if (debug)
                        dev_info(pulse8->dev, "received: %*ph\n",
                                 pulse8->idx, pulse8->buf);
-               pulse8->data[0] = pulse8->buf[0];
-               switch (pulse8->buf[0] & 0x3f) {
+               switch (msgcode & 0x3f) {
                case MSGCODE_FRAME_START:
                        msg->len = 1;
                        msg->msg[0] = pulse8->buf[1];
@@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
                        if (msg->len == CEC_MAX_MSG_SIZE)
                                break;
                        msg->msg[msg->len++] = pulse8->buf[1];
-                       if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
+                       if (msgcode & MSGCODE_FRAME_EOM) {
+                               WARN_ON(pulse8->work_result);
+                               pulse8->work_result = msgcode;
                                schedule_work(&pulse8->work);
+                               break;
+                       }
                        break;
                case MSGCODE_TRANSMIT_SUCCEEDED:
                case MSGCODE_TRANSMIT_FAILED_LINE:
                case MSGCODE_TRANSMIT_FAILED_ACK:
                case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
                case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+                       WARN_ON(pulse8->work_result);
+                       pulse8->work_result = msgcode;
                        schedule_work(&pulse8->work);
                        break;
                case MSGCODE_HIGH_ERROR:
index 4e700583659bac8ce871876166cdf44ebabf4e0e..003b7422aeef612cb6023d8dc45fd440d2b8bf2e 100644 (file)
@@ -2652,7 +2652,7 @@ struct v4l2_ioctl_info {
 /* Zero struct from after the field to the end */
 #define INFO_FL_CLEAR(v4l2_struct, field)                      \
        ((offsetof(struct v4l2_struct, field) +                 \
-         FIELD_SIZEOF(struct v4l2_struct, field)) << 16)
+         sizeof_field(struct v4l2_struct, field)) << 16)
 #define INFO_FL_CLEAR_MASK     (_IOC_SIZEMASK << 16)
 
 #define DEFINE_V4L_STUB_FUNC(_vidioc)                          \
index f9ac224130008026cc317f8d6ea26272dd7bf10c..1074b882c57c87d2da4565fbf73494a8933ebe04 100644 (file)
@@ -100,19 +100,19 @@ struct buflist {
  * Function prototypes. Called from OS entry point mptctl_ioctl.
  * arg contents specific to function.
  */
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
 
 static int  mptctl_probe(struct pci_dev *, const struct pci_device_id *);
 static void mptctl_remove(struct pci_dev *);
@@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
 /*
  * Private function calls.
  */
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
 static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
                struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
 static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
@@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
         * by TM and FW reloads.
         */
        if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
-               return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+               return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
        } else if (cmd == MPTTARGETINFO) {
-               return mptctl_gettargetinfo(arg);
+               return mptctl_gettargetinfo(iocp, arg);
        } else if (cmd == MPTTEST) {
-               return mptctl_readtest(arg);
+               return mptctl_readtest(iocp, arg);
        } else if (cmd == MPTEVENTQUERY) {
-               return mptctl_eventquery(arg);
+               return mptctl_eventquery(iocp, arg);
        } else if (cmd == MPTEVENTENABLE) {
-               return mptctl_eventenable(arg);
+               return mptctl_eventenable(iocp, arg);
        } else if (cmd == MPTEVENTREPORT) {
-               return mptctl_eventreport(arg);
+               return mptctl_eventreport(iocp, arg);
        } else if (cmd == MPTFWREPLACE) {
-               return mptctl_replace_fw(arg);
+               return mptctl_replace_fw(iocp, arg);
        }
 
        /* All of these commands require an interrupt or
@@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return ret;
 
        if (cmd == MPTFWDOWNLOAD)
-               ret = mptctl_fw_download(arg);
+               ret = mptctl_fw_download(iocp, arg);
        else if (cmd == MPTCOMMAND)
-               ret = mptctl_mpt_command(arg);
+               ret = mptctl_mpt_command(iocp, arg);
        else if (cmd == MPTHARDRESET)
-               ret = mptctl_do_reset(arg);
+               ret = mptctl_do_reset(iocp, arg);
        else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
-               ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+               ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
        else if (cmd == HP_GETTARGETINFO)
-               ret = mptctl_hp_targetinfo(arg);
+               ret = mptctl_hp_targetinfo(iocp, arg);
        else
                ret = -EINVAL;
 
@@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return ret;
 }
 
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
 {
        struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
        struct mpt_ioctl_diag_reset krinfo;
-       MPT_ADAPTER             *iocp;
 
        if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
@@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg)
                return -EFAULT;
        }
 
-       if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
-               printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
-                               __FILE__, __LINE__, krinfo.hdr.iocnum);
-               return -ENODEV; /* (-6) No such device or address */
-       }
-
        dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
            iocp->name));
 
@@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg)
  *             -ENOMSG if FW upload returned bad status
  */
 static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
 {
        struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
        struct mpt_fw_xfer       kfwdl;
@@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg)
                return -EFAULT;
        }
 
-       return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+       return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg)
  *             -ENOMSG if FW upload returned bad status
  */
 static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
 {
        FWDownload_t            *dlmsg;
        MPT_FRAME_HDR           *mf;
-       MPT_ADAPTER             *iocp;
        FWDownloadTCSGE_t       *ptsge;
        MptSge_t                *sgl, *sgIn;
        char                    *sgOut;
@@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
        pFWDownloadReply_t       ReplyMsg = NULL;
        unsigned long            timeleft;
 
-       if (mpt_verify_adapter(ioc, &iocp) < 0) {
-               printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
-                                ioc);
-               return -ENODEV; /* (-6) No such device or address */
-       } else {
-
-               /*  Valid device. Get a message frame and construct the FW download message.
-               */
-               if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
-                       return -EAGAIN;
-       }
+       /*  Valid device. Get a message frame and construct the FW download message.
+       */
+       if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+               return -EAGAIN;
 
        dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
            "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
@@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
            iocp->name, ufwbuf));
        dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
            iocp->name, (int)fwlen));
-       dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc   = %04xh\n",
-           iocp->name, ioc));
 
        dlmsg = (FWDownload_t*) mf;
        ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
@@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
 {
        struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
        struct mpt_ioctl_iocinfo *karg;
-       MPT_ADAPTER             *ioc;
        struct pci_dev          *pdev;
-       int                     iocnum;
        unsigned int            port;
        int                     cim_rev;
        struct scsi_device      *sdev;
@@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
                return PTR_ERR(karg);
        }
 
-       if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               kfree(karg);
-               return -ENODEV;
-       }
-
        /* Verify the data transfer size is correct. */
        if (karg->hdr.maxDataSize != data_size) {
                printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
@@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
        struct mpt_ioctl_targetinfo karg;
-       MPT_ADAPTER             *ioc;
        VirtDevice              *vdevice;
        char                    *pmem;
        int                     *pdata;
-       int                     iocnum;
        int                     numDevices = 0;
        int                     lun;
        int                     maxWordsLeft;
@@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
            ioc->name));
        /* Get the port number and set the maximum number of bytes
@@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg)
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_test __user *uarg = (void __user *) arg;
        struct mpt_ioctl_test    karg;
-       MPT_ADAPTER *ioc;
-       int iocnum;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
@@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
            ioc->name));
        /* Fill in the data and return the structure to the calling
@@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg)
  *             -ENODEV  if no such device/adapter
  */
 static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
        struct mpt_ioctl_eventquery      karg;
-       MPT_ADAPTER *ioc;
-       int iocnum;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
@@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
            ioc->name));
        karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
@@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
        struct mpt_ioctl_eventenable     karg;
-       MPT_ADAPTER *ioc;
-       int iocnum;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
                printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
@@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
            ioc->name));
        if (ioc->events == NULL) {
@@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
        struct mpt_ioctl_eventreport     karg;
-       MPT_ADAPTER              *ioc;
-       int                      iocnum;
        int                      numBytes, maxEvents, max;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
@@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
            ioc->name));
 
@@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
        struct mpt_ioctl_replace_fw      karg;
-       MPT_ADAPTER              *ioc;
-       int                      iocnum;
        int                      newFwSize;
 
        if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
@@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
            ioc->name));
        /* If caching FW, Free the old FW image
@@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg)
  *             -ENOMEM if memory allocation error
  */
 static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
 {
        struct mpt_ioctl_command __user *uarg = (void __user *) arg;
        struct mpt_ioctl_command  karg;
-       MPT_ADAPTER     *ioc;
-       int             iocnum;
        int             rc;
 
 
@@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
-       rc = mptctl_do_mpt_command (karg, &uarg->MF);
+       rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
 
        return rc;
 }
@@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg)
  *             -EPERM if SCSI I/O and target is untagged
  */
 static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
 {
-       MPT_ADAPTER     *ioc;
        MPT_FRAME_HDR   *mf = NULL;
        MPIHeader_t     *hdr;
        char            *psge;
@@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        dma_addr_t      dma_addr_in;
        dma_addr_t      dma_addr_out;
        int             sgSize = 0;     /* Num SG elements */
-       int             iocnum, flagsLength;
+       int             flagsLength;
        int             sz, rc = 0;
        int             msgContext;
        u16             req_idx;
@@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        bufIn.kptr = bufOut.kptr = NULL;
        bufIn.len = bufOut.len = 0;
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
-
        spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
        if (ioc->ioc_reset_in_progress) {
                spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
@@ -2418,17 +2321,15 @@ done_free_mem:
  *             -ENOMEM if memory allocation error
  */
 static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
 {
        hp_host_info_t  __user *uarg = (void __user *) arg;
-       MPT_ADAPTER             *ioc;
        struct pci_dev          *pdev;
        char                    *pbuf=NULL;
        dma_addr_t              buf_dma;
        hp_host_info_t          karg;
        CONFIGPARMS             cfg;
        ConfigPageHeader_t      hdr;
-       int                     iocnum;
        int                     rc, cim_rev;
        ToolboxIstwiReadWriteRequest_t  *IstwiRWRequest;
        MPT_FRAME_HDR           *mf = NULL;
@@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-           (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
            ioc->name));
 
@@ -2659,15 +2554,13 @@ retry_wait:
  *             -ENOMEM if memory allocation error
  */
 static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
 {
        hp_target_info_t __user *uarg = (void __user *) arg;
        SCSIDevicePage0_t       *pg0_alloc;
        SCSIDevicePage3_t       *pg3_alloc;
-       MPT_ADAPTER             *ioc;
        MPT_SCSI_HOST           *hd = NULL;
        hp_target_info_t        karg;
-       int                     iocnum;
        int                     data_sz;
        dma_addr_t              page_dma;
        CONFIGPARMS             cfg;
@@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg)
                return -EFAULT;
        }
 
-       if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
-               (ioc == NULL)) {
-               printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
-                               __FILE__, __LINE__, iocnum);
-               return -ENODEV;
-       }
        if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
                return -EINVAL;
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
@@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
        kfw.fwlen = kfw32.fwlen;
        kfw.bufp = compat_ptr(kfw32.bufp);
 
-       ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+       ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
 
        mutex_unlock(&iocp->ioctl_cmds.mutex);
 
@@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
 
        /* Pass new structure to do_mpt_command
         */
-       ret = mptctl_do_mpt_command (karg, &uarg->MF);
+       ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
 
        mutex_unlock(&iocp->ioctl_cmds.mutex);
 
index fd7b2167103d5e7ee4dd4361dc8306fff3b371c1..06038b325b023a815c277de89854536e495e3be6 100644 (file)
@@ -1512,7 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
                bar = 1;
        len = pci_resource_len(pcidev, bar);
        base = pci_resource_start(pcidev, bar);
-       pcr->remap_addr = ioremap_nocache(base, len);
+       pcr->remap_addr = ioremap(base, len);
        if (!pcr->remap_addr) {
                ret = -ENOMEM;
                goto free_handle;
index 6d27ccfe068021127efd5452d7ff872859bcbe5f..3c2d405bc79b951f88a618289bfc8d47e16fac52 100644 (file)
@@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
                cdev = &edev->component[i];
                if (cdev->dev == dev) {
                        enclosure_remove_links(cdev);
-                       device_del(&cdev->cdev);
                        put_device(dev);
                        cdev->dev = NULL;
-                       return device_add(&cdev->cdev);
+                       return 0;
                }
        }
        return -ENODEV;
index 8850f475a4136e47d28f94c66b99cab49822505c..0bf08678431b2a707af2fbda9194012646dc460d 100644 (file)
@@ -824,8 +824,9 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
        memset(args, 0, sizeof(*args));
 
        if (rc < 0) {
-               dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
-                       rc, seq);
+               dev_err_ratelimited(hdev->dev,
+                               "Error %ld on waiting for CS handle %llu\n",
+                               rc, seq);
                if (rc == -ERESTARTSYS) {
                        args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
                        rc = -EINTR;
index 17db7b3dfb4c2635d9c67a4d3b3d24392fcd96bb..2df6fb87e7ff9bab9ebdd3a8a7f512ca002c59f1 100644 (file)
@@ -176,7 +176,7 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
        spin_lock(&ctx->cs_lock);
 
        if (seq >= ctx->cs_sequence) {
-               dev_notice(hdev->dev,
+               dev_notice_ratelimited(hdev->dev,
                        "Can't wait on seq %llu because current CS is at seq %llu\n",
                        seq, ctx->cs_sequence);
                spin_unlock(&ctx->cs_lock);
index c8d16aa4382c57bd312485e5d1073b3c47ce3947..7344e8a222ae567fd3f525c242ce49a1750fe3ff 100644 (file)
@@ -2192,7 +2192,7 @@ static int goya_push_linux_to_device(struct hl_device *hdev)
 
 static int goya_pldm_init_cpu(struct hl_device *hdev)
 {
-       u32 val, unit_rst_val;
+       u32 unit_rst_val;
        int rc;
 
        /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
@@ -2200,14 +2200,14 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
 
        /* Put ARM cores into reset */
        WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
-       val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+       RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
 
        /* Reset the CA53 MACRO */
        unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
        WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
-       val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+       RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
        WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
-       val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
+       RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
 
        rc = goya_push_uboot_to_device(hdev);
        if (rc)
@@ -2228,7 +2228,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
        /* Release ARM core 0 from reset */
        WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
                                        CPU_RESET_CORE0_DEASSERT);
-       val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
+       RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
 
        return 0;
 }
@@ -2502,13 +2502,12 @@ err:
 static int goya_hw_init(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u32 val;
        int rc;
 
        dev_info(hdev->dev, "Starting initialization of H/W\n");
 
        /* Perform read from the device to make sure device is up */
-       val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
 
        /*
         * Let's mark in the H/W that we have reached this point. We check
@@ -2560,7 +2559,7 @@ static int goya_hw_init(struct hl_device *hdev)
                goto disable_queues;
 
        /* Perform read from the device to flush all MSI-X configuration */
-       val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
 
        return 0;
 
index a4fdad04809a994bf9afdbbb561eb059b0084fc9..de87693cf557d405ba86e8977c5aeab6ce15f2f6 100644 (file)
@@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
 
 void lkdtm_UNSET_SMEP(void)
 {
-#ifdef CONFIG_X86_64
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
 #define MOV_CR4_DEPTH  64
        void (*direct_write_cr4)(unsigned long val);
        unsigned char *insn;
@@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void)
                native_write_cr4(cr4);
        }
 #else
-       pr_err("FAIL: this test is x86_64-only\n");
+       pr_err("XFAIL: this test is x86_64-only\n");
 #endif
 }
 
-#ifdef CONFIG_X86_32
 void lkdtm_DOUBLE_FAULT(void)
 {
+#ifdef CONFIG_X86_32
        /*
         * Trigger #DF by setting the stack limit to zero.  This clobbers
         * a GDT TLS slot, which is okay because the current task will die
@@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void)
        asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
                      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
 
-       panic("tried to double fault but didn't die\n");
-}
+       pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+       pr_err("XFAIL: this test is ia32-only\n");
 #endif
+}
index c25fd40f3bd0fade7b98f505f73f1371ce8b1fd4..fcd999f50d14349ad007daf965183af5ebb98dca 100644 (file)
@@ -788,7 +788,7 @@ scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
                        "failed to setup interrupts for %d\n", msg->src.node);
                goto interrupt_setup_error;
        }
-       newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
+       newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len);
        if (!newdev->mmio.va) {
                dev_err(&scifdev->sdev->dev,
                        "failed to map mmio for %d\n", msg->src.node);
index 994563a078eb9f249b8576ddb0ab69abaa4de434..de8a66b9d76bbf93d0f1a85b1119c10b2ea2cc48 100644 (file)
@@ -10,18 +10,17 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
        int pasid;
        struct ocxl_context *ctx;
 
-       *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
-       if (!*context)
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
                return -ENOMEM;
 
-       ctx = *context;
-
        ctx->afu = afu;
        mutex_lock(&afu->contexts_lock);
        pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
                        afu->pasid_base + afu->pasid_max, GFP_KERNEL);
        if (pasid < 0) {
                mutex_unlock(&afu->contexts_lock);
+               kfree(ctx);
                return pasid;
        }
        afu->pasid_count++;
@@ -43,6 +42,7 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
         * duration of the life of the context
         */
        ocxl_afu_get(afu);
+       *context = ctx;
        return 0;
 }
 EXPORT_SYMBOL_GPL(ocxl_context_alloc);
index 2870c25da166f7bc1576464f248940b8396d2699..4d1b44de14921d58e6c5f4c20e9a0dc01c20aec2 100644 (file)
@@ -18,18 +18,15 @@ static struct class *ocxl_class;
 static struct mutex minors_idr_lock;
 static struct idr minors_idr;
 
-static struct ocxl_file_info *find_file_info(dev_t devno)
+static struct ocxl_file_info *find_and_get_file_info(dev_t devno)
 {
        struct ocxl_file_info *info;
 
-       /*
-        * We don't declare an RCU critical section here, as our AFU
-        * is protected by a reference counter on the device. By the time the
-        * info reference is removed from the idr, the ref count of
-        * the device is already at 0, so no user API will access that AFU and
-        * this function can't return it.
-        */
+       mutex_lock(&minors_idr_lock);
        info = idr_find(&minors_idr, MINOR(devno));
+       if (info)
+               get_device(&info->dev);
+       mutex_unlock(&minors_idr_lock);
        return info;
 }
 
@@ -58,14 +55,16 @@ static int afu_open(struct inode *inode, struct file *file)
 
        pr_debug("%s for device %x\n", __func__, inode->i_rdev);
 
-       info = find_file_info(inode->i_rdev);
+       info = find_and_get_file_info(inode->i_rdev);
        if (!info)
                return -ENODEV;
 
        rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping);
-       if (rc)
+       if (rc) {
+               put_device(&info->dev);
                return rc;
-
+       }
+       put_device(&info->dev);
        file->private_data = ctx;
        return 0;
 }
@@ -487,7 +486,6 @@ static void info_release(struct device *dev)
 {
        struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev);
 
-       free_minor(info);
        ocxl_afu_put(info->afu);
        kfree(info);
 }
@@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu)
 
        ocxl_file_make_invisible(info);
        ocxl_sysfs_unregister_afu(info);
+       free_minor(info);
        device_unregister(&info->dev);
 }
 
index 359c5bab45acf4f0f3cd875ea8918548706b36e7..063e4419cd7e13cfb4de3da23fc51c13494a4b66 100644 (file)
@@ -834,7 +834,7 @@ static int pti_pci_probe(struct pci_dev *pdev,
        }
        drv_data->aperture_base = drv_data->pti_addr+APERTURE_14;
        drv_data->pti_ioaddr =
-               ioremap_nocache((u32)drv_data->aperture_base,
+               ioremap((u32)drv_data->aperture_base,
                APERTURE_LEN);
        if (!drv_data->pti_ioaddr) {
                retval = -ENOMEM;
index 5e6be1527571aaf0d7e7e4646768b123f9e85f57..b837e7eba5f7dcec2ae1f15bb2052c18b9a0f306 100644 (file)
@@ -17,6 +17,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/types.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
index 95b41c0891d02c5d58734cb8103e47ddbbf2474b..663d87924e5e8a224b78b49578d968e3b4b5bc15 100644 (file)
@@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
                                         card->erase_arg == MMC_TRIM_ARG ?
                                         INAND_CMD38_ARG_TRIM :
                                         INAND_CMD38_ARG_ERASE,
-                                        0);
+                                        card->ext_csd.generic_cmd6_time);
                }
                if (!err)
                        err = mmc_erase(card, from, nr, card->erase_arg);
@@ -1149,7 +1149,7 @@ retry:
                                 arg == MMC_SECURE_TRIM1_ARG ?
                                 INAND_CMD38_ARG_SECTRIM1 :
                                 INAND_CMD38_ARG_SECERASE,
-                                0);
+                                card->ext_csd.generic_cmd6_time);
                if (err)
                        goto out_retry;
        }
@@ -1167,7 +1167,7 @@ retry:
                        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                         INAND_CMD38_ARG_EXT_CSD,
                                         INAND_CMD38_ARG_SECTRIM2,
-                                        0);
+                                        card->ext_csd.generic_cmd6_time);
                        if (err)
                                goto out_retry;
                }
index abf8f5eb0a1c8bebf45dc02a83b7cb9e7795f5df..aa54d359dab74beb7ea27850b5ac144511df3fdc 100644 (file)
@@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work)
        }
 
        for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-               if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+               unsigned int freq = freqs[i];
+               if (freq > host->f_max) {
+                       if (i + 1 < ARRAY_SIZE(freqs))
+                               continue;
+                       freq = host->f_max;
+               }
+               if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
                        break;
                if (freqs[i] <= host->f_min)
                        break;
@@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work)
 
 void mmc_start_host(struct mmc_host *host)
 {
-       host->f_init = max(freqs[0], host->f_min);
+       host->f_init = max(min(freqs[0], host->f_max), host->f_min);
        host->rescan_disable = 0;
        host->ios.power_mode = MMC_POWER_UNDEFINED;
 
index 105b7a7c025133b8afbf3a3221376caa16015ce8..c8768726d9251c249c49884f36d96b39532582c7 100644 (file)
@@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host)
        struct device *dev = host->parent;
        u32 bus_width, drv_type, cd_debounce_delay_ms;
        int ret;
-       bool cd_cap_invert, cd_gpio_invert = false;
-       bool ro_cap_invert, ro_gpio_invert = false;
 
        if (!dev || !dev_fwnode(dev))
                return 0;
@@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host)
         */
 
        /* Parse Card Detection */
+
        if (device_property_read_bool(dev, "non-removable")) {
                host->caps |= MMC_CAP_NONREMOVABLE;
        } else {
-               cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+               if (device_property_read_bool(dev, "cd-inverted"))
+                       host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
 
                if (device_property_read_u32(dev, "cd-debounce-delay-ms",
                                             &cd_debounce_delay_ms))
@@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host)
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, false,
-                                          cd_debounce_delay_ms * 1000,
-                                          &cd_gpio_invert);
+                                          cd_debounce_delay_ms * 1000);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
                else if (ret != -ENOENT && ret != -ENOSYS)
                        return ret;
-
-               /*
-                * There are two ways to flag that the CD line is inverted:
-                * through the cd-inverted flag and by the GPIO line itself
-                * being inverted from the GPIO subsystem. This is a leftover
-                * from the times when the GPIO subsystem did not make it
-                * possible to flag a line as inverted.
-                *
-                * If the capability on the host AND the GPIO line are
-                * both inverted, the end result is that the CD line is
-                * not inverted.
-                */
-               if (cd_cap_invert ^ cd_gpio_invert)
-                       host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
        }
 
        /* Parse Write Protection */
-       ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
 
-       ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
+       if (device_property_read_bool(dev, "wp-inverted"))
+               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+       ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
        if (!ret)
                dev_info(host->parent, "Got WP GPIO\n");
        else if (ret != -ENOENT && ret != -ENOSYS)
@@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host)
        if (device_property_read_bool(dev, "disable-wp"))
                host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
 
-       /* See the comment on CD inversion above */
-       if (ro_cap_invert ^ ro_gpio_invert)
-               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
        if (device_property_read_bool(dev, "cap-sd-highspeed"))
                host->caps |= MMC_CAP_SD_HIGHSPEED;
        if (device_property_read_bool(dev, "cap-mmc-highspeed"))
index 09113b9ad67907822ed7e53bcae652e40ec44f4c..da425ee2d9bf50da3cee4962dcdb5b6fe341ab6e 100644 (file)
@@ -19,7 +19,9 @@
 #include "host.h"
 #include "mmc_ops.h"
 
-#define MMC_OPS_TIMEOUT_MS     (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_OPS_TIMEOUT_MS             (10 * 60 * 1000) /* 10min*/
+#define MMC_BKOPS_TIMEOUT_MS           (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS     (30 * 1000) /* 30s */
 
 static const u8 tuning_blk_pattern_4bit[] = {
        0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
@@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
        bool expired = false;
        bool busy = false;
 
-       /* We have an unspecified cmd timeout, use the fallback value. */
-       if (!timeout_ms)
-               timeout_ms = MMC_OPS_TIMEOUT_MS;
-
        /*
         * In cases when not allowed to poll by using CMD13 or because we aren't
         * capable of polling by using ->card_busy(), then rely on waiting the
@@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 
        mmc_retune_hold(host);
 
+       if (!timeout_ms) {
+               pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+                       mmc_hostname(host));
+               timeout_ms = card->ext_csd.generic_cmd6_time;
+       }
+
        /*
-        * If the cmd timeout and the max_busy_timeout of the host are both
-        * specified, let's validate them. A failure means we need to prevent
-        * the host from doing hw busy detection, which is done by converting
-        * to a R1 response instead of a R1B.
+        * If the max_busy_timeout of the host is specified, make sure it's
+        * enough to fit the used timeout_ms. In case it's not, let's instruct
+        * the host to avoid HW busy detection, by converting to a R1 response
+        * instead of a R1B.
         */
-       if (timeout_ms && host->max_busy_timeout &&
-               (timeout_ms > host->max_busy_timeout))
+       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
                use_r1b_resp = false;
 
        cmd.opcode = MMC_SWITCH;
@@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
        cmd.flags = MMC_CMD_AC;
        if (use_r1b_resp) {
                cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
-               /*
-                * A busy_timeout of zero means the host can decide to use
-                * whatever value it finds suitable.
-                */
                cmd.busy_timeout = timeout_ms;
        } else {
                cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
@@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card)
         * urgent levels by using an asynchronous background task, when idle.
         */
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                       EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
+                        EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
        if (err)
                pr_warn("%s: Error %d starting bkops\n",
                        mmc_hostname(card->host), err);
@@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card)
                        (card->ext_csd.cache_size > 0) &&
                        (card->ext_csd.cache_ctrl & 1)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                               EXT_CSD_FLUSH_CACHE, 1, 0);
+                                EXT_CSD_FLUSH_CACHE, 1,
+                                MMC_CACHE_FLUSH_TIMEOUT_MS);
                if (err)
                        pr_err("%s: cache flush error %d\n",
                                        mmc_hostname(card->host), err);
index da2596c5fa28dba4fd5d8a92b2f399864d7b4ffa..05e907451df905113da314cb91ef451c26c70ff2 100644 (file)
@@ -19,7 +19,6 @@
 struct mmc_gpio {
        struct gpio_desc *ro_gpio;
        struct gpio_desc *cd_gpio;
-       bool override_cd_active_level;
        irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
        char *ro_label;
        char *cd_label;
@@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host)
                return -ENOSYS;
 
        cansleep = gpiod_cansleep(ctx->cd_gpio);
-       if (ctx->override_cd_active_level) {
-               int value = cansleep ?
-                               gpiod_get_raw_value_cansleep(ctx->cd_gpio) :
-                               gpiod_get_raw_value(ctx->cd_gpio);
-               return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
-       }
-
        return cansleep ?
                gpiod_get_value_cansleep(ctx->cd_gpio) :
                gpiod_get_value(ctx->cd_gpio);
@@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
  * @idx: index of the GPIO to obtain in the consumer
  * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
  * @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not, set
- * to NULL to ignore
  *
  * Note that this must be called prior to mmc_add_host()
  * otherwise the caller must also call mmc_gpiod_request_cd_irq().
@@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
  */
 int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
                         unsigned int idx, bool override_active_level,
-                        unsigned int debounce, bool *gpio_invert)
+                        unsigned int debounce)
 {
        struct mmc_gpio *ctx = host->slot.handler_priv;
        struct gpio_desc *desc;
@@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
                        ctx->cd_debounce_delay_ms = debounce / 1000;
        }
 
-       if (gpio_invert)
-               *gpio_invert = !gpiod_is_active_low(desc);
+       /* override forces default (active-low) polarity ... */
+       if (override_active_level && !gpiod_is_active_low(desc))
+               gpiod_toggle_active_low(desc);
+
+       /* ... or active-high */
+       if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+               gpiod_toggle_active_low(desc);
 
-       ctx->override_cd_active_level = override_active_level;
        ctx->cd_gpio = desc;
 
        return 0;
@@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
  * @con_id: function within the GPIO consumer
  * @idx: index of the GPIO to obtain in the consumer
  * @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not,
- * set to NULL to ignore
  *
  * Returns zero on success, else an error.
  */
 int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
-                        unsigned int idx,
-                        unsigned int debounce, bool *gpio_invert)
+                        unsigned int idx, unsigned int debounce)
 {
        struct mmc_gpio *ctx = host->slot.handler_priv;
        struct gpio_desc *desc;
@@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
                        return ret;
        }
 
-       if (gpio_invert)
-               *gpio_invert = !gpiod_is_active_low(desc);
+       if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+               gpiod_toggle_active_low(desc);
 
        ctx->ro_gpio = desc;
 
index d06b2dfe3c9574a51e64b428955fba86d21bd7a0..3a5089f0332c5f06f702a44f1d9b44eeddad1c86 100644 (file)
@@ -501,6 +501,7 @@ config MMC_SDHCI_MSM
        depends on ARCH_QCOM || (ARM && COMPILE_TEST)
        depends on MMC_SDHCI_PLTFM
        select MMC_SDHCI_IO_ACCESSORS
+       select MMC_CQHCI
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in Qualcomm SOCs. The controller supports
@@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB
        tristate "Broadcom SDIO/SD/MMC support"
        depends on ARCH_BRCMSTB || BMIPS_GENERIC
        depends on MMC_SDHCI_PLTFM
+       select MMC_CQHCI
        default y
        help
          This selects support for the SDIO/SD/MMC Host Controller on
@@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP
        depends on MMC_SDHCI_PLTFM && OF
        select THERMAL
        imply TI_SOC_THERMAL
+       select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in TI's DRA7 SOCs. The controller supports
@@ -1040,3 +1043,6 @@ config MMC_OWL
        help
          This selects support for the SD/MMC Host Controller on
          Actions Semi Owl SoCs.
+
+config MMC_SDHCI_EXTERNAL_DMA
+       bool
index 6f065bb5c55a7e0f54ed6bc85f3d6c9570190f81..aeaaa5314924947f8eb714171cd5ff75af5b445d 100644 (file)
@@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev)
 {
        struct atmel_mci *host = dev_get_drvdata(dev);
 
-       pinctrl_pm_select_default_state(dev);
+       pinctrl_select_default_state(dev);
 
        return clk_prepare_enable(host->mck);
 }
index bc8aeb47a7b48f9ba5d0f6c66565b990419d6f39..8823680ca42c97514d1024cd929bc5c4e5dd1606 100644 (file)
@@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no IRQ defined\n");
+       host->irq = platform_get_irq(pdev, 0);
+       if (host->irq < 0)
                goto out3;
-       }
-       host->irq = r->start;
 
        mmc->ops = &au1xmmc_ops;
 
index 99f61fd2a658bd26e9efae27e2873788a792c6c8..c3d949847cbd9d6cd1ffbf4674d93532e658233a 100644 (file)
@@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev)
        host->dma_chan = NULL;
        host->dma_desc = NULL;
 
-       host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
+       host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
+       if (IS_ERR(host->dma_chan_rxtx)) {
+               ret = PTR_ERR(host->dma_chan_rxtx);
+               host->dma_chan_rxtx = NULL;
+
+               if (ret == -EPROBE_DEFER)
+                       goto err;
+
+               /* Ignore errors to fall back to PIO mode */
+       }
+
 
        clk = devm_clk_get(dev, NULL);
        if (IS_ERR(clk)) {
index eee08d81b24214c3ea0c5555fa1f1f419249f9d6..76013bbbcff300c0f574ad25879ffa36207e1add 100644 (file)
@@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
                return ret;
 
        host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
-       if (!host->base)
-               return -EINVAL;
+       if (!host->base) {
+               ret = -EINVAL;
+               goto error;
+       }
 
        /* On ThunderX these are identical */
        host->dma_base = host->base;
@@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
        host->reg_off_dma = 0x160;
 
        host->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(host->clk))
-               return PTR_ERR(host->clk);
+       if (IS_ERR(host->clk)) {
+               ret = PTR_ERR(host->clk);
+               goto error;
+       }
 
        ret = clk_prepare_enable(host->clk);
        if (ret)
-               return ret;
+               goto error;
        host->sys_freq = clk_get_rate(host->clk);
 
        spin_lock_init(&host->irq_handler_lock);
@@ -157,6 +161,7 @@ error:
                }
        }
        clk_disable_unprepare(host->clk);
+       pci_release_regions(pdev);
        return ret;
 }
 
@@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
        writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 
        clk_disable_unprepare(host->clk);
+       pci_release_regions(pdev);
 }
 
 static const struct pci_device_id thunder_mmc_id_table[] = {
index ebfaeb33bc8c063e684ab351edf4e8bca26fcc15..f01fecd75833d8a9f05fee959ce7a39cf290f35e 100644 (file)
@@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
                mmc->caps |= pdata->caps;
 
        /* Register a cd gpio, if there is not one, enable polling */
-       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                return ret;
        else if (ret)
                mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-       ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+       ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
        if (ret == -EPROBE_DEFER)
                return ret;
 
index fc9d4d000f97e4342bd0b19ae8834382c9e11289..bc5278ab5707c7c467337da8f375803a0b952b3b 100644 (file)
@@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host)
        if (!host->dms)
                return -ENOMEM;
 
-       host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
-       if (!host->dms->ch) {
+       host->dms->ch = dma_request_chan(host->dev, "rx-tx");
+       if (IS_ERR(host->dms->ch)) {
+               int ret = PTR_ERR(host->dms->ch);
+
                dev_err(host->dev, "Failed to get external DMA channel.\n");
                kfree(host->dms);
                host->dms = NULL;
-               return -ENXIO;
+               return ret;
        }
 
        return 0;
index 78383f60a3dce60c7ff03faa51f615495557204a..fbae87d1f017e4ef70513514c3d2692626655137 100644 (file)
@@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev)
 
 static int jz4740_mmc_resume(struct device *dev)
 {
-       return pinctrl_pm_select_default_state(dev);
+       return pinctrl_select_default_state(dev);
 }
 
 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
index e712315c7e8d29c72d020d3f398f9b8b212ccbda..35400cf2a2e4a59733a6fc0a630d0e69748866d9 100644 (file)
@@ -161,7 +161,6 @@ struct meson_host {
        bool dram_access_quirk;
 
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pins_default;
        struct pinctrl_state *pins_clk_gate;
 
        unsigned int bounce_buf_size;
@@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
        u32 cfg;
 
        if (host->pins_clk_gate)
-               pinctrl_select_state(host->pinctrl, host->pins_default);
+               pinctrl_select_default_state(host->dev);
 
        /* Make sure the clock is not stopped in the controller */
        cfg = readl(host->regs + SD_EMMC_CFG);
@@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
                goto free_host;
        }
 
-       host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                                                 PINCTRL_STATE_DEFAULT);
-       if (IS_ERR(host->pins_default)) {
-               ret = PTR_ERR(host->pins_default);
-               goto free_host;
-       }
-
        host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
                                                   "clk-gate");
        if (IS_ERR(host->pins_clk_gate)) {
index ba9a63db73da934b94a79d3f882007bca792b39d..8b038e7b2cd312e2c5e3d123b8a148595aadbe1d 100644 (file)
@@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
        struct platform_device *slot_pdev;
        struct mmc_host *mmc;
        struct meson_mx_mmc_host *host;
-       struct resource *res;
        int ret, irq;
        u32 conf;
 
@@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->base = devm_ioremap_resource(host->controller_dev, res);
+       host->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->base)) {
                ret = PTR_ERR(host->base);
                goto error_free_mmc;
index 74c6cfbf91720da87a6ef745dc19f32871174f25..951f76dc1ddd9277869394e6802f7c51276526c8 100644 (file)
@@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
         * SPI protocol.  Another is that when chipselect is released while
         * the card returns BUSY status, the clock must issue several cycles
         * with chipselect high before the card will stop driving its output.
+        *
+        * SPI_CS_HIGH means "asserted" here. In some cases like when using
+        * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+        * inverted by gpiolib, so if we want to ascertain to drive it high
+        * we should toggle the default with an XOR as we do here.
         */
-       host->spi->mode |= SPI_CS_HIGH;
+       host->spi->mode ^= SPI_CS_HIGH;
        if (spi_setup(host->spi) != 0) {
                /* Just warn; most cards work without it. */
                dev_warn(&host->spi->dev,
                                "can't change chip-select polarity\n");
-               host->spi->mode &= ~SPI_CS_HIGH;
+               host->spi->mode ^= SPI_CS_HIGH;
        } else {
                mmc_spi_readbytes(host, 18);
 
-               host->spi->mode &= ~SPI_CS_HIGH;
+               host->spi->mode ^= SPI_CS_HIGH;
                if (spi_setup(host->spi) != 0) {
                        /* Wot, we can't get the same setup we had before? */
                        dev_err(&host->spi->dev,
@@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi)
         * Index 0 is card detect
         * Old boardfiles were specifying 1 ms as debounce
         */
-       status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
+       status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
        if (status == -EPROBE_DEFER)
                goto fail_add_host;
        if (!status) {
@@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi)
        mmc_detect_change(mmc, 0);
 
        /* Index 1 is write protect/read only */
-       status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
+       status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
        if (status == -EPROBE_DEFER)
                goto fail_add_host;
        if (!status)
index 40e72c30ea84ba9033f5bc3cb172c54f47a43cae..e9ffce8d41ea072bf6a2c792353401fc364fd0e2 100644 (file)
@@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = {
        .cmdreg_srsp            = MCI_CPSM_RESPONSE,
        .datalength_bits        = 24,
        .datactrl_blocksz       = 11,
+       .datactrl_any_blocksz   = true,
+       .dma_power_of_2         = true,
        .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
@@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = {
        .datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
        .datalength_bits        = 24,
        .datactrl_blocksz       = 11,
+       .datactrl_any_blocksz   = true,
+       .dma_power_of_2         = true,
        .datactrl_mask_sdio     = MCI_DPSM_ST_SDIOEN,
        .st_sdio                = true,
        .st_clkdiv              = true,
@@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = {
        .datacnt_useless        = true,
        .datalength_bits        = 25,
        .datactrl_blocksz       = 14,
+       .datactrl_any_blocksz   = true,
        .stm32_idmabsize_mask   = GENMASK(12, 5),
        .busy_timeout           = true,
        .busy_detect            = true,
@@ -284,6 +289,7 @@ static struct variant_data variant_qcom = {
        .data_cmd_enable        = MCI_CPSM_QCOM_DATCMD,
        .datalength_bits        = 24,
        .datactrl_blocksz       = 11,
+       .datactrl_any_blocksz   = true,
        .pwrreg_powerup         = MCI_PWR_UP,
        .f_max                  = 208000000,
        .explicit_mclk_control  = true,
@@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host)
 static int mmci_validate_data(struct mmci_host *host,
                              struct mmc_data *data)
 {
+       struct variant_data *variant = host->variant;
+
        if (!data)
                return 0;
-
-       if (!is_power_of_2(data->blksz)) {
+       if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
                dev_err(mmc_dev(host->mmc),
                        "unsupported block size (%d bytes)\n", data->blksz);
                return -EINVAL;
@@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
                 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
                 data->sg_len, data->blksz, data->blocks, data->flags);
 
-       host->ops->dma_start(host, &datactrl);
+       ret = host->ops->dma_start(host, &datactrl);
+       if (ret)
+               return ret;
 
        /* Trigger the DMA transfer */
        mmci_write_datactrlreg(host, datactrl);
@@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host)
 
        host->dma_priv = dmae;
 
-       dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
-                                                    "rx");
-       dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
-                                                    "tx");
+       dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+       if (IS_ERR(dmae->rx_channel)) {
+               int ret = PTR_ERR(dmae->rx_channel);
+               dmae->rx_channel = NULL;
+               return ret;
+       }
+
+       dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+       if (IS_ERR(dmae->tx_channel)) {
+               if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+                       dev_warn(mmc_dev(host->mmc),
+                                "Deferred probe for TX channel ignored\n");
+               dmae->tx_channel = NULL;
+       }
 
        /*
         * If only an RX channel is specified, the driver will
@@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
        if (data->blksz * data->blocks <= variant->fifosize)
                return -EINVAL;
 
+       /*
+        * This is necessary to get SDIO working on the Ux500. We do not yet
+        * know if this is a bug in:
+        * - The Ux500 DMA controller (DMA40)
+        * - The MMCI DMA interface on the Ux500
+        * some power of two blocks (such as 64 bytes) are sent regularly
+        * during SDIO traffic and those work fine so for these we enable DMA
+        * transfers.
+        */
+       if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+               return -EINVAL;
+
        device = chan->device;
        nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
                           mmc_get_dma_dir(data));
@@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host,
 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
 {
        struct mmci_dmae_priv *dmae = host->dma_priv;
+       int ret;
 
        host->dma_in_progress = true;
-       dmaengine_submit(dmae->desc_current);
+       ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+       if (ret < 0) {
+               host->dma_in_progress = false;
+               return ret;
+       }
        dma_async_issue_pending(dmae->cur);
 
        *datactrl |= MCI_DPSM_DMAENABLE;
@@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        } else if (host->variant->busy_timeout && busy_resp &&
                   status & MCI_DATATIMEOUT) {
                cmd->error = -ETIMEDOUT;
+               host->irq_action = IRQ_WAKE_THREAD;
        } else {
                cmd->resp[0] = readl(base + MMCIRESPONSE0);
                cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
                                return;
                        }
                }
-               mmci_request_end(host, host->mrq);
+
+               if (host->irq_action != IRQ_WAKE_THREAD)
+                       mmci_request_end(host, host->mrq);
+
        } else if (sbc) {
                mmci_start_command(host, host->mrq->cmd, 0);
        } else if (!host->variant->datactrl_first &&
@@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
 {
        struct mmci_host *host = dev_id;
        u32 status;
-       int ret = 0;
 
        spin_lock(&host->lock);
+       host->irq_action = IRQ_HANDLED;
 
        do {
                status = readl(host->base + MMCISTATUS);
@@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                if (host->variant->busy_detect_flag)
                        status &= ~host->variant->busy_detect_flag;
 
-               ret = 1;
        } while (status);
 
        spin_unlock(&host->lock);
 
-       return IRQ_RETVAL(ret);
+       return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+       struct mmci_host *host = dev_id;
+       unsigned long flags;
+
+       if (host->rst) {
+               reset_control_assert(host->rst);
+               udelay(2);
+               reset_control_deassert(host->rst);
+       }
+
+       spin_lock_irqsave(&host->lock, flags);
+       writel(host->clk_reg, host->base + MMCICLOCK);
+       writel(host->pwr_reg, host->base + MMCIPOWER);
+       writel(MCI_IRQENABLE | host->variant->start_err,
+              host->base + MMCIMASK0);
+
+       host->irq_action = IRQ_HANDLED;
+       mmci_request_end(host, host->mrq);
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return host->irq_action;
 }
 
 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
                        pinctrl_select_state(host->pinctrl, host->pins_opendrain);
                else
-                       pinctrl_select_state(host->pinctrl, host->pins_default);
+                       pinctrl_select_default_state(mmc_dev(mmc));
        }
 
        /*
@@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev,
                        goto host_free;
                }
 
-               host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                                                         PINCTRL_STATE_DEFAULT);
-               if (IS_ERR(host->pins_default)) {
-                       dev_err(mmc_dev(mmc), "Can't select default pins\n");
-                       ret = PTR_ERR(host->pins_default);
-                       goto host_free;
-               }
-
                host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
                                                            MMCI_PINCTRL_STATE_OPENDRAIN);
                if (IS_ERR(host->pins_opendrain)) {
@@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev,
         * silently of these do not exist
         */
        if (!np) {
-               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
                if (ret == -EPROBE_DEFER)
                        goto clk_disable;
 
-               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
                if (ret == -EPROBE_DEFER)
                        goto clk_disable;
        }
 
-       ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
-                       DRIVER_NAME " (cmd)", host);
+       ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+                                       mmci_irq_thread, IRQF_SHARED,
+                                       DRIVER_NAME " (cmd)", host);
        if (ret)
                goto clk_disable;
 
@@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev)
                struct mmci_host *host = mmc_priv(mmc);
                clk_prepare_enable(host->clk);
                mmci_restore(host);
-               pinctrl_pm_select_default_state(dev);
+               pinctrl_select_default_state(dev);
        }
 
        return 0;
index 158e1231aa23b4ffc9caf347c1f90274c983e4fe..ea6a0b5779d4522f10fb998316e98838f86c11a8 100644 (file)
@@ -279,7 +279,11 @@ struct mmci_host;
  * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
  * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
  * @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ *               hardware, such as with some SDIO traffic that send
+ *               odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
  * @datactrl_first: true if data must be setup before send command
  * @datacnt_useless: true if you could not use datacnt register to read
  *                  remaining data
@@ -326,6 +330,8 @@ struct variant_data {
        unsigned int            datactrl_mask_ddrmode;
        unsigned int            datactrl_mask_sdio;
        unsigned int            datactrl_blocksz;
+       u8                      datactrl_any_blocksz:1;
+       u8                      dma_power_of_2:1;
        u8                      datactrl_first:1;
        u8                      datacnt_useless:1;
        u8                      st_sdio:1;
@@ -404,7 +410,6 @@ struct mmci_host {
        struct mmci_host_ops    *ops;
        struct variant_data     *variant;
        struct pinctrl          *pinctrl;
-       struct pinctrl_state    *pins_default;
        struct pinctrl_state    *pins_opendrain;
 
        u8                      hw_designer;
@@ -412,6 +417,7 @@ struct mmci_host {
 
        struct timer_list       timer;
        unsigned int            oldstat;
+       u32                     irq_action;
 
        /* pio stuff */
        struct sg_mapping_iter  sg_miter;
index 189e42674d8599103ea9a91a5cfaf075d705813b..7726dcf48f2ce14ad9241b3eaca1bcf772dfd239 100644 (file)
 #define MSDC_PATCH_BIT_SPCPUSH    (0x1 << 29)  /* RW */
 #define MSDC_PATCH_BIT_DECRCTMO   (0x1 << 30)  /* RW */
 
+#define MSDC_PATCH_BIT1_CMDTA     (0x7 << 3)    /* RW */
 #define MSDC_PATCH_BIT1_STOP_DLY  (0xf << 8)    /* RW */
 
 #define MSDC_PATCH_BIT2_CFGRESP   (0x1 << 15)   /* RW */
@@ -1881,6 +1882,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
 
        /* select EMMC50 PAD CMD tune */
        sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
+       sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
 
        if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
            mmc->ios.timing == MMC_TIMING_UHS_SDR104)
@@ -2192,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
        if (ret)
                goto host_free;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->base = devm_ioremap_resource(&pdev->dev, res);
+       host->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->base)) {
                ret = PTR_ERR(host->base);
                goto host_free;
index 74a0a7fbbf7fd69b22e13e044b4a2af70b1dae5d..203b617126014aaa5062e69636045c4b37fc6a36 100644 (file)
@@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev)
        struct mmc_host *mmc = NULL;
        struct mvsd_host *host = NULL;
        const struct mbus_dram_target_info *dram;
-       struct resource *r;
        int ret, irq;
 
        if (!np) {
                dev_err(&pdev->dev, "no DT node\n");
                return -ENODEV;
        }
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!r || irq < 0)
+       if (irq < 0)
                return -ENXIO;
 
        mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
@@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev)
 
        spin_lock_init(&host->lock);
 
-       host->base = devm_ioremap_resource(&pdev->dev, r);
+       host->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->base)) {
                ret = PTR_ERR(host->base);
                goto out;
index 011b59a3602eb8abc0ed2aa86dc9ab55161eec5a..b3d654c688e52769b50745eb6f7db574535f86dc 100644 (file)
@@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev)
        mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
 
        if (!host->pdata) {
-               host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+               host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+               if (IS_ERR(host->dma)) {
+                       if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
+                               ret = -EPROBE_DEFER;
+                               goto out_clk_put;
+                       }
+
+                       /* Ignore errors to fall back to PIO mode */
+                       host->dma = NULL;
+               }
        } else {
                res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
                if (res) {
index 4031217d21c37be1a847ace9d8369c041ab95561..d82674aed4474bc6990d27db4b0d8d9b5236beeb 100644 (file)
@@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
                goto out_clk_disable;
        }
 
-       ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
-       if (!ssp->dmach) {
+       ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+       if (IS_ERR(ssp->dmach)) {
                dev_err(mmc_dev(host->mmc),
                        "%s: failed to request dma\n", __func__);
-               ret = -ENODEV;
+               ret = PTR_ERR(ssp->dmach);
                goto out_clk_disable;
        }
 
index 767e964ca5a2a879b23df9bd1e1694bcfebf2c4b..a379c45b985cebea031d2c33fb72210741813ecc 100644 (file)
@@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
                        ret = PTR_ERR(p);
                        goto err_free_irq;
                }
-               if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
-                       dev_info(host->dev, "missing default pinctrl state\n");
-                       devm_pinctrl_put(p);
-                       ret = -EINVAL;
-                       goto err_free_irq;
-               }
 
                if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
                        dev_info(host->dev, "missing idle pinctrl state\n");
@@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
        if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
            (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
 
-               pinctrl_pm_select_default_state(host->dev);
+               pinctrl_select_default_state(host->dev);
 
                /* irq lost, if pinmux incorrect */
                OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
                OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
                OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
        } else {
-               pinctrl_pm_select_default_state(host->dev);
+               pinctrl_select_default_state(host->dev);
        }
        spin_unlock_irqrestore(&host->irq_lock, flags);
        return 0;
index 771e3d00f1bb3fc9fa675d8bd6b997f9432ddaae..01ffe51f413df8556a1731ab02ec3a9ce1721007 100644 (file)
@@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev)
 
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
-       if (!owl_host->dma) {
+       owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
+       if (IS_ERR(owl_host->dma)) {
                dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
-               ret = -ENXIO;
+               ret = PTR_ERR(owl_host->dma);
                goto err_free_host;
        }
 
index 024acc1b0a2eab59f51246123c7bd6eb6019ad97..3a9333475a2b3c3eb0d06806ed01e740a8fa87d9 100644 (file)
@@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, mmc);
 
-       host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
-       if (host->dma_chan_rx == NULL) {
+       host->dma_chan_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(host->dma_chan_rx)) {
                dev_err(dev, "unable to request rx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(host->dma_chan_rx);
+               host->dma_chan_rx = NULL;
                goto out;
        }
 
-       host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
-       if (host->dma_chan_tx == NULL) {
+       host->dma_chan_tx = dma_request_chan(dev, "tx");
+       if (IS_ERR(host->dma_chan_tx)) {
                dev_err(dev, "unable to request tx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(host->dma_chan_tx);
+               host->dma_chan_tx = NULL;
                goto out;
        }
 
@@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev)
                }
 
                /* FIXME: should we pass detection delay to debounce? */
-               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+               ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
                if (ret && ret != -ENOENT) {
                        dev_err(dev, "Failed requesting gpio_cd\n");
                        goto out;
                }
 
-               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+               if (!host->pdata->gpio_card_ro_invert)
+                       mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+               ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
                if (ret && ret != -ENOENT) {
                        dev_err(dev, "Failed requesting gpio_ro\n");
                        goto out;
                }
-               if (!ret) {
+               if (!ret)
                        host->use_ro_gpio = true;
-                       mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
-                               0 : MMC_CAP2_RO_ACTIVE_HIGH;
-               }
 
                if (host->pdata->init)
                        host->pdata->init(dev, pxamci_detect_irq, mmc);
index c0504aa908575f427d59e48fb0d816b36cafe550..f524251d5113aea15de2b5443ce656ca04ead393 100644 (file)
@@ -14,8 +14,8 @@
 
 struct renesas_sdhi_scc {
        unsigned long clk_rate; /* clock rate for SDR104 */
-       u32 tap;                /* sampling clock position for SDR104 */
-       u32 tap_hs400;          /* sampling clock position for HS400 */
+       u32 tap;                /* sampling clock position for SDR104/HS400 (8 TAP) */
+       u32 tap_hs400_4tap;     /* sampling clock position for HS400 (4 TAP) */
 };
 
 struct renesas_sdhi_of_data {
@@ -33,6 +33,11 @@ struct renesas_sdhi_of_data {
        unsigned short max_segs;
 };
 
+struct renesas_sdhi_quirks {
+       bool hs400_disabled;
+       bool hs400_4taps;
+};
+
 struct tmio_mmc_dma {
        enum dma_slave_buswidth dma_buswidth;
        bool (*filter)(struct dma_chan *chan, void *arg);
@@ -46,6 +51,7 @@ struct renesas_sdhi {
        struct clk *clk_cd;
        struct tmio_mmc_data mmc_data;
        struct tmio_mmc_dma dma_priv;
+       const struct renesas_sdhi_quirks *quirks;
        struct pinctrl *pinctrl;
        struct pinctrl_state *pins_default, *pins_uhs;
        void __iomem *scc_ctl;
index 234551a68739b65b0ccc3de9b0967ccaff12a3b9..35cb24cd45b40b9b91b67403697eb181f3c59ae3 100644 (file)
 #define SDHI_VER_GEN3_SD       0xcc10
 #define SDHI_VER_GEN3_SDMMC    0xcd10
 
-struct renesas_sdhi_quirks {
-       bool hs400_disabled;
-       bool hs400_4taps;
-};
-
 static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
 {
        u32 val;
@@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
                       0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
 
 
-       if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+       if (priv->quirks && priv->quirks->hs400_4taps)
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
                               host->tap_set / 2);
 
@@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
 static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
 {
        struct renesas_sdhi *priv = host_to_priv(host);
-       bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+       bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
 
        /*
         * Skip checking SCC errors when running on 4 taps in HS400 mode as
@@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
 };
 
 static const struct soc_device_attribute sdhi_quirks_match[]  = {
+       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
        { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
-       { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
        { /* Sentinel. */ },
 };
@@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        if (!priv)
                return -ENOMEM;
 
+       priv->quirks = quirks;
        mmc_data = &priv->mmc_data;
        dma_priv = &priv->dma_priv;
 
@@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        if (quirks && quirks->hs400_disabled)
                host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
 
-       if (quirks && quirks->hs400_4taps)
-               mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400;
-
        /* For some SoC, we disable internal WP. GPIO may override this */
        if (mmc_can_gpio_ro(host->mmc))
                mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
@@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev,
             host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
                                 MMC_CAP2_HS400_1_8V))) {
                const struct renesas_sdhi_scc *taps = of_data->taps;
+               bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
                bool hit = false;
 
                for (i = 0; i < of_data->taps_num; i++) {
                        if (taps[i].clk_rate == 0 ||
                            taps[i].clk_rate == host->mmc->f_max) {
                                priv->scc_tappos = taps->tap;
-                               priv->scc_tappos_hs400 = taps->tap_hs400;
+                               priv->scc_tappos_hs400 = use_4tap ?
+                                                        taps->tap_hs400_4tap :
+                                                        taps->tap;
                                hit = true;
                                break;
                        }
                }
 
                if (!hit)
-                       dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+                       dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
 
                host->init_tuning = renesas_sdhi_init_tuning;
                host->prepare_tuning = renesas_sdhi_prepare_tuning;
index 18839a10594c6e23d46502449433a023455974ac..47ac53e912411ee08fe1e56bdde58138c177c3b0 100644 (file)
@@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
        {
                .clk_rate = 0,
                .tap = 0x00000300,
-               .tap_hs400 = 0x00000704,
+               .tap_hs400_4tap = 0x00000100,
        },
 };
 
@@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
  * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
  * implementation as others may use a different implementation.
  */
-static const struct soc_device_attribute soc_whitelist[] = {
-       /* specific ones */
+static const struct soc_device_attribute soc_dma_quirks[] = {
        { .soc_id = "r7s9210",
          .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
        { .soc_id = "r8a7795", .revision = "ES1.*",
          .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
        { .soc_id = "r8a7796", .revision = "ES1.0",
          .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
-       /* generic ones */
-       { .soc_id = "r8a774a1" },
-       { .soc_id = "r8a774b1" },
-       { .soc_id = "r8a774c0" },
-       { .soc_id = "r8a77470" },
-       { .soc_id = "r8a7795" },
-       { .soc_id = "r8a7796" },
-       { .soc_id = "r8a77965" },
-       { .soc_id = "r8a77970" },
-       { .soc_id = "r8a77980" },
-       { .soc_id = "r8a77990" },
-       { .soc_id = "r8a77995" },
        { /* sentinel */ }
 };
 
 static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
 {
-       const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+       const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
        struct device *dev = &pdev->dev;
 
-       if (!soc)
-               return -ENODEV;
-
-       global_flags |= (unsigned long)soc->data;
+       if (soc)
+               global_flags |= (unsigned long)soc->data;
 
        dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
        if (!dev->dma_parms)
index bce9c33bc4b55caeff41bbe3aceb42587508ea3b..1e616ae56b132a74d50b3003c9bbab7a5ba6b600 100644 (file)
@@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
                mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
        /* If we get -ENOENT we have no card detect GPIO line */
-       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
        if (ret != -ENOENT) {
                dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
                        ret);
                return ret;
        }
 
-       ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+       ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
        if (ret != -ENOENT) {
                dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
                        ret);
index 105e73d4a3b9c2160497c63588284600c0b89663..9651dca6863ec299df9062879015a83aadc25a78 100644 (file)
@@ -719,7 +719,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                goto err_free;
        }
 
-       host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+       host->ioaddr = devm_ioremap(dev, iomem->start,
                                            resource_size(iomem));
        if (host->ioaddr == NULL) {
                err = -ENOMEM;
@@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
                bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
 
-               err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+               err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
                if (err) {
                        if (err == -EPROBE_DEFER)
                                goto err_free;
index 73bb440aaf93f8fc0352afd68082830022e3f717..ad01f6451a953d61f4594ceef6b69b32389943f0 100644 (file)
 #include <linux/mmc/host.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include "sdhci-pltfm.h"
+#include "cqhci.h"
 
-static const struct sdhci_ops sdhci_brcmstb_ops = {
+#define SDHCI_VENDOR 0x78
+#define  SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+#define BRCMSTB_PRIV_FLAGS_NO_64BIT            BIT(0)
+#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT      BIT(1)
+
+#define SDHCI_ARASAN_CQE_BASE_ADDR             0x200
+
+struct sdhci_brcmstb_priv {
+       void __iomem *cfg_regs;
+       bool has_cqe;
+};
+
+struct brcmstb_match_priv {
+       void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+       struct sdhci_ops *ops;
+       unsigned int flags;
+};
+
+static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       u32 reg;
+
+       dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n",
+               __func__);
+       reg = readl(host->ioaddr + SDHCI_VENDOR);
+       if (ios->enhanced_strobe)
+               reg |= SDHCI_VENDOR_ENHANCED_STRB;
+       else
+               reg &= ~SDHCI_VENDOR_ENHANCED_STRB;
+       writel(reg, host->ioaddr + SDHCI_VENDOR);
+}
+
+static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+       u16 clk;
+
+       host->mmc->actual_clock = 0;
+
+       clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+       sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+       if (clock == 0)
+               return;
+
+       sdhci_enable_clk(host, clk);
+}
+
+static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
+                                           unsigned int timing)
+{
+       u16 ctrl_2;
+
+       dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n",
+               __func__, timing);
+       ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+       /* Select Bus Speed Mode for host */
+       ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+       if ((timing == MMC_TIMING_MMC_HS200) ||
+           (timing == MMC_TIMING_UHS_SDR104))
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+       else if (timing == MMC_TIMING_UHS_SDR12)
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+       else if (timing == MMC_TIMING_SD_HS ||
+                timing == MMC_TIMING_MMC_HS ||
+                timing == MMC_TIMING_UHS_SDR25)
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+       else if (timing == MMC_TIMING_UHS_SDR50)
+               ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+       else if ((timing == MMC_TIMING_UHS_DDR50) ||
+                (timing == MMC_TIMING_MMC_DDR52))
+               ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+       else if (timing == MMC_TIMING_MMC_HS400)
+               ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+       sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
+{
+       sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       u32 reg;
+
+       reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+       while (reg & SDHCI_DATA_AVAILABLE) {
+               sdhci_readl(host, SDHCI_BUFFER);
+               reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+       }
+
+       sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = {
+       .enable         = sdhci_brcmstb_cqe_enable,
+       .disable        = sdhci_cqe_disable,
+       .dumpregs       = sdhci_brcmstb_dumpregs,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops = {
        .set_clock = sdhci_set_clock,
        .set_bus_width = sdhci_set_bus_width,
        .reset = sdhci_reset,
        .set_uhs_signaling = sdhci_set_uhs_signaling,
 };
 
-static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+       .set_clock = sdhci_brcmstb_set_clock,
+       .set_bus_width = sdhci_set_bus_width,
+       .reset = sdhci_reset,
+       .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct brcmstb_match_priv match_priv_7425 = {
+       .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+       BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
        .ops = &sdhci_brcmstb_ops,
 };
 
+static struct brcmstb_match_priv match_priv_7445 = {
+       .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+       .ops = &sdhci_brcmstb_ops,
+};
+
+static const struct brcmstb_match_priv match_priv_7216 = {
+       .hs400es = sdhci_brcmstb_hs400es,
+       .ops = &sdhci_brcmstb_ops_7216,
+};
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+       { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+       { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+       { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+       {},
+};
+
+static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+       return 0;
+}
+
+static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+                                 struct sdhci_brcmstb_priv *priv)
+{
+       struct cqhci_host *cq_host;
+       bool dma64;
+       int ret;
+
+       if (!priv->has_cqe)
+               return sdhci_add_host(host);
+
+       dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+       host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+       ret = sdhci_setup_host(host);
+       if (ret)
+               return ret;
+
+       cq_host = devm_kzalloc(mmc_dev(host->mmc),
+                              sizeof(*cq_host), GFP_KERNEL);
+       if (!cq_host) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+       cq_host->ops = &sdhci_brcmstb_cqhci_ops;
+
+       dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+       if (dma64) {
+               dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+               cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+               cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+       }
+
+       ret = cqhci_init(cq_host, host->mmc, dma64);
+       if (ret)
+               goto cleanup;
+
+       ret = __sdhci_add_host(host);
+       if (ret)
+               goto cleanup;
+
+       return 0;
+
+cleanup:
+       sdhci_cleanup_host(host);
+       return ret;
+}
+
 static int sdhci_brcmstb_probe(struct platform_device *pdev)
 {
-       struct sdhci_host *host;
+       const struct brcmstb_match_priv *match_priv;
+       struct sdhci_pltfm_data brcmstb_pdata;
        struct sdhci_pltfm_host *pltfm_host;
+       const struct of_device_id *match;
+       struct sdhci_brcmstb_priv *priv;
+       struct sdhci_host *host;
+       struct resource *iomem;
+       bool has_cqe = false;
        struct clk *clk;
        int res;
 
+       match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
+       match_priv = match->data;
+
+       dev_dbg(&pdev->dev, "Probe found match for %s\n",  match->compatible);
+
        clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
+               if (PTR_ERR(clk) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
                dev_err(&pdev->dev, "Clock not found in Device Tree\n");
                clk = NULL;
        }
@@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
        if (res)
                return res;
 
-       host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+       memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+       if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+               has_cqe = true;
+               match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+       }
+       brcmstb_pdata.ops = match_priv->ops;
+       host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+                               sizeof(struct sdhci_brcmstb_priv));
        if (IS_ERR(host)) {
                res = PTR_ERR(host);
                goto err_clk;
        }
 
+       pltfm_host = sdhci_priv(host);
+       priv = sdhci_pltfm_priv(pltfm_host);
+       priv->has_cqe = has_cqe;
+
+       /* Map in the non-standard CFG registers */
+       iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+       if (IS_ERR(priv->cfg_regs)) {
+               res = PTR_ERR(priv->cfg_regs);
+               goto err;
+       }
+
        sdhci_get_of_property(pdev);
        res = mmc_of_parse(host->mmc);
        if (res)
                goto err;
 
+       /*
+        * If the chip has enhanced strobe and it's enabled, add
+        * callback
+        */
+       if (match_priv->hs400es &&
+           (host->mmc->caps2 & MMC_CAP2_HS400_ES))
+               host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+
        /*
         * Supply the existing CAPS, but clear the UHS modes. This
         * will allow these modes to be specified by device tree
         * properties through mmc_of_parse().
         */
        host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
-       if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+       if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
                host->caps &= ~SDHCI_CAN_64BIT;
        host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
        host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
-                       SDHCI_SUPPORT_DDR50);
-       host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
-               SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+                        SDHCI_SUPPORT_DDR50);
+       host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+       if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+               host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
 
-       res = sdhci_add_host(host);
+       res = sdhci_brcmstb_add_host(host, priv);
        if (res)
                goto err;
 
-       pltfm_host = sdhci_priv(host);
        pltfm_host->clk = clk;
        return res;
 
@@ -79,11 +314,15 @@ err_clk:
        return res;
 }
 
-static const struct of_device_id sdhci_brcm_of_match[] = {
-       { .compatible = "brcm,bcm7425-sdhci" },
-       { .compatible = "brcm,bcm7445-sdhci" },
-       {},
-};
+static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+{
+       int ret;
+
+       ret = sdhci_pltfm_unregister(pdev);
+       if (ret)
+               dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
 MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
 
 static struct platform_driver sdhci_brcmstb_driver = {
@@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = {
        },
        .probe          = sdhci_brcmstb_probe,
        .remove         = sdhci_pltfm_unregister,
+       .shutdown       = sdhci_brcmstb_shutdown,
 };
 
 module_platform_driver(sdhci_brcmstb_driver);
index ae0ec27dd7cce046151e550ef445f78bf77e1242..5827d3751b813176a6446463678a7448c1b389ab 100644 (file)
@@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
        return 0;
 }
 
-static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+static void *sdhci_cdns_priv(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
index 1c988d6a24330abd484f2c73bd249d15b5e1fcd8..382f25b2fa458da7667dc158226059344c9828c3 100644 (file)
@@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
 struct pltfm_imx_data {
        u32 scratchpad;
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pins_default;
        struct pinctrl_state *pins_100mhz;
        struct pinctrl_state *pins_200mhz;
        const struct esdhc_soc_data *socdata;
@@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
        dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
 
        if (IS_ERR(imx_data->pinctrl) ||
-               IS_ERR(imx_data->pins_default) ||
                IS_ERR(imx_data->pins_100mhz) ||
                IS_ERR(imx_data->pins_200mhz))
                return -EINVAL;
@@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
                break;
        default:
                /* back to default state for other legacy timing */
-               pinctrl = imx_data->pins_default;
+               return pinctrl_select_default_state(mmc_dev(host->mmc));
        }
 
        return pinctrl_select_state(imx_data->pinctrl, pinctrl);
@@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
 
        mmc_of_parse_voltage(np, &host->ocr_mask);
 
-       if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
+       if (esdhc_is_usdhc(imx_data)) {
                imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
                                                ESDHC_PINCTRL_STATE_100MHZ);
                imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
                                host->mmc->parent->platform_data);
        /* write_protect */
        if (boarddata->wp_type == ESDHC_WP_GPIO) {
-               err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+               err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
                if (err) {
                        dev_err(mmc_dev(host->mmc),
                                "failed to request write-protect gpio!\n");
                        return err;
                }
-               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
        }
 
        /* card_detect */
        switch (boarddata->cd_type) {
        case ESDHC_CD_GPIO:
-               err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+               err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
                if (err) {
                        dev_err(mmc_dev(host->mmc),
                                "failed to request card-detect gpio!\n");
@@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                goto disable_ahb_clk;
        }
 
-       imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
-                                               PINCTRL_STATE_DEFAULT);
-       if (IS_ERR(imx_data->pins_default))
-               dev_warn(mmc_dev(host->mmc), "could not get default state\n");
-
        if (esdhc_is_usdhc(imx_data)) {
                host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
                host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
index a1aa21b9ae1c6152035969848e046cc928afbd88..92f30a1db4352c9be4fefb17e31c59afa91d42a7 100644 (file)
@@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
        struct device *dev = &pdev->dev;
-       struct resource *res;
        int irq, ret = 0;
        struct f_sdhost_priv *priv;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "%s: no irq specified\n", __func__);
+       if (irq < 0)
                return irq;
-       }
 
        host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
        if (IS_ERR(host))
@@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev)
        host->ops = &sdhci_milbeaut_ops;
        host->irq = irq;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                goto err;
index b75c82d8d6c17064951f625ab5d8ae4309f15f72..c3a160c1804772963840deab9b593caa4c1640e0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/regulator/consumer.h>
 
 #include "sdhci-pltfm.h"
+#include "cqhci.h"
 
 #define CORE_MCI_VERSION               0x50
 #define CORE_VERSION_MAJOR_SHIFT       28
 
 #define CORE_PWRSAVE_DLL       BIT(3)
 
-#define DDR_CONFIG_POR_VAL     0x80040853
+#define DDR_CONFIG_POR_VAL     0x80040873
 
 
 #define INVALID_TUNING_PHASE   -1
 #define msm_host_writel(msm_host, val, host, offset) \
        msm_host->var_ops->msm_writel_relaxed(val, host, offset)
 
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1      0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN  (0x3 << 13)
+
 struct sdhci_msm_offset {
        u32 core_hc_mode;
        u32 core_mci_data_cnt;
@@ -148,8 +153,9 @@ struct sdhci_msm_offset {
        u32 core_ddr_200_cfg;
        u32 core_vendor_spec3;
        u32 core_dll_config_2;
+       u32 core_dll_config_3;
+       u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
        u32 core_ddr_config;
-       u32 core_ddr_config_2;
 };
 
 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
@@ -177,8 +183,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
        .core_ddr_200_cfg = 0x224,
        .core_vendor_spec3 = 0x250,
        .core_dll_config_2 = 0x254,
-       .core_ddr_config = 0x258,
-       .core_ddr_config_2 = 0x25c,
+       .core_dll_config_3 = 0x258,
+       .core_ddr_config = 0x25c,
 };
 
 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
@@ -207,8 +213,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
        .core_ddr_200_cfg = 0x184,
        .core_vendor_spec3 = 0x1b0,
        .core_dll_config_2 = 0x1b4,
-       .core_ddr_config = 0x1b8,
-       .core_ddr_config_2 = 0x1bc,
+       .core_ddr_config_old = 0x1b8,
+       .core_ddr_config = 0x1bc,
 };
 
 struct sdhci_msm_variant_ops {
@@ -253,6 +259,7 @@ struct sdhci_msm_host {
        const struct sdhci_msm_offset *offset;
        bool use_cdr;
        u32 transfer_mode;
+       bool updated_ddr_cfg;
 };
 
 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
@@ -924,8 +931,10 @@ out:
 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
 {
        struct mmc_host *mmc = host->mmc;
-       u32 dll_status, config;
+       u32 dll_status, config, ddr_cfg_offset;
        int ret;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
        const struct sdhci_msm_offset *msm_offset =
                                        sdhci_priv_msm_offset(host);
 
@@ -938,8 +947,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
         * bootloaders. In the future, if this changes, then the desired
         * values will need to be programmed appropriately.
         */
-       writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
-                       msm_offset->core_ddr_config);
+       if (msm_host->updated_ddr_cfg)
+               ddr_cfg_offset = msm_offset->core_ddr_config;
+       else
+               ddr_cfg_offset = msm_offset->core_ddr_config_old;
+       writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
 
        if (mmc->ios.enhanced_strobe) {
                config = readl_relaxed(host->ioaddr +
@@ -1560,6 +1572,127 @@ out:
        __sdhci_msm_set_clock(host, clock);
 }
 
+/*****************************************************************************\
+ *                                                                           *
+ * MSM Command Queue Engine (CQE)                                            *
+ *                                                                           *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+       return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+       u32 ctrl;
+
+       /*
+        * When CQE is halted, the legacy SDHCI path operates only
+        * on 16-byte descriptors in 64bit mode.
+        */
+       if (host->flags & SDHCI_USE_64_BIT_DMA)
+               host->desc_sz = 16;
+
+       spin_lock_irqsave(&host->lock, flags);
+
+       /*
+        * During CQE command transfers, command complete bit gets latched.
+        * So s/w should clear command complete interrupt status when CQE is
+        * either halted or disabled. Otherwise unexpected SDCHI legacy
+        * interrupt gets triggered when CQE is halted/disabled.
+        */
+       ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+       ctrl |= SDHCI_INT_RESPONSE;
+       sdhci_writel(host,  ctrl, SDHCI_INT_ENABLE);
+       sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+       .enable         = sdhci_cqe_enable,
+       .disable        = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+                               struct platform_device *pdev)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       struct cqhci_host *cq_host;
+       bool dma64;
+       u32 cqcfg;
+       int ret;
+
+       /*
+        * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+        * So ensure ADMA table is allocated for 16byte descriptors.
+        */
+       if (host->caps & SDHCI_CAN_64BIT)
+               host->alloc_desc_sz = 16;
+
+       ret = sdhci_setup_host(host);
+       if (ret)
+               return ret;
+
+       cq_host = cqhci_pltfm_init(pdev);
+       if (IS_ERR(cq_host)) {
+               ret = PTR_ERR(cq_host);
+               dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+               goto cleanup;
+       }
+
+       msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+       cq_host->ops = &sdhci_msm_cqhci_ops;
+
+       dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+       ret = cqhci_init(cq_host, host->mmc, dma64);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+                               mmc_hostname(host->mmc), ret);
+               goto cleanup;
+       }
+
+       /* Disable cqe reset due to cqe enable signal */
+       cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+       cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+       cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+       /*
+        * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+        * So limit desc_sz to 12 so that the data commands that are sent
+        * during card initialization (before CQE gets enabled) would
+        * get executed without any issues.
+        */
+       if (host->flags & SDHCI_USE_64_BIT_DMA)
+               host->desc_sz = 12;
+
+       ret = __sdhci_add_host(host);
+       if (ret)
+               goto cleanup;
+
+       dev_info(&pdev->dev, "%s: CQE init: success\n",
+                       mmc_hostname(host->mmc));
+       return ret;
+
+cleanup:
+       sdhci_cleanup_host(host);
+       return ret;
+}
+
 /*
  * Platform specific register write functions. This is so that, if any
  * register write needs to be followed up by platform specific actions,
@@ -1724,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
        .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
        .write_w = sdhci_msm_writew,
        .write_b = sdhci_msm_writeb,
+       .irq    = sdhci_msm_cqe_irq,
 };
 
 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1739,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        struct sdhci_host *host;
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_msm_host *msm_host;
-       struct resource *core_memres;
        struct clk *clk;
        int ret;
        u16 host_version, core_minor;
@@ -1747,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        u8 core_major;
        const struct sdhci_msm_offset *msm_offset;
        const struct sdhci_msm_variant_info *var_info;
+       struct device_node *node = pdev->dev.of_node;
 
        host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
        if (IS_ERR(host))
@@ -1840,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        }
 
        if (!msm_host->mci_removed) {
-               core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
-                               core_memres);
-
+               msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
                if (IS_ERR(msm_host->core_mem)) {
                        ret = PTR_ERR(msm_host->core_mem);
                        goto clk_disable;
@@ -1899,6 +2030,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
                                msm_offset->core_vendor_spec_capabilities0);
        }
 
+       if (core_major == 1 && core_minor >= 0x49)
+               msm_host->updated_ddr_cfg = true;
+
        /*
         * Power on reset state may trigger power irq if previous status of
         * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
@@ -1942,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
        pm_runtime_use_autosuspend(&pdev->dev);
 
        host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
-       ret = sdhci_add_host(host);
+       if (of_property_read_bool(node, "supports-cqe"))
+               ret = sdhci_msm_cqe_add_host(host, pdev);
+       else
+               ret = sdhci_add_host(host);
        if (ret)
                goto pm_runtime_disable;
        sdhci_msm_set_regulator_caps(msm_host);
index 5959e394b416f07163314bd98d8fd69d0634588c..ab2bd314a390799b3f8e1f071b0f6c2c164ec80c 100644 (file)
 
 #define SDHCI_AT91_PRESET_COMMON_CONF  0x400 /* drv type B, programmable clock mode */
 
+struct sdhci_at91_soc_data {
+       const struct sdhci_pltfm_data *pdata;
+       bool baseclk_is_generated_internally;
+       unsigned int divider_for_baseclk;
+};
+
 struct sdhci_at91_priv {
+       const struct sdhci_at91_soc_data *soc_data;
        struct clk *hclock;
        struct clk *gck;
        struct clk *mainck;
@@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
        .set_power              = sdhci_at91_set_power,
 };
 
-static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
        .ops = &sdhci_at91_sama5d2_ops,
 };
 
+static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
+       .pdata = &sdhci_sama5d2_pdata,
+       .baseclk_is_generated_internally = false,
+};
+
+static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
+       .pdata = &sdhci_sama5d2_pdata,
+       .baseclk_is_generated_internally = true,
+       .divider_for_baseclk = 2,
+};
+
 static const struct of_device_id sdhci_at91_dt_match[] = {
        { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+       { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
        {}
 };
 MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
@@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
        struct sdhci_host *host = dev_get_drvdata(dev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
-       int ret;
        unsigned int                    caps0, caps1;
        unsigned int                    clk_base, clk_mul;
-       unsigned int                    gck_rate, real_gck_rate;
+       unsigned int                    gck_rate, clk_base_rate;
        unsigned int                    preset_div;
 
-       /*
-        * The mult clock is provided by as a generated clock by the PMC
-        * controller. In order to set the rate of gck, we have to get the
-        * base clock rate and the clock mult from capabilities.
-        */
        clk_prepare_enable(priv->hclock);
        caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
        caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
-       clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
-       clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
-       gck_rate = clk_base * 1000000 * (clk_mul + 1);
-       ret = clk_set_rate(priv->gck, gck_rate);
-       if (ret < 0) {
-               dev_err(dev, "failed to set gck");
-               clk_disable_unprepare(priv->hclock);
-               return ret;
-       }
-       /*
-        * We need to check if we have the requested rate for gck because in
-        * some cases this rate could be not supported. If it happens, the rate
-        * is the closest one gck can provide. We have to update the value
-        * of clk mul.
-        */
-       real_gck_rate = clk_get_rate(priv->gck);
-       if (real_gck_rate != gck_rate) {
-               clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
-               caps1 &= (~SDHCI_CLOCK_MUL_MASK);
-               caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) &
-                         SDHCI_CLOCK_MUL_MASK);
-               /* Set capabilities in r/w mode. */
-               writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN,
-                      host->ioaddr + SDMMC_CACR);
-               writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
-               /* Set capabilities in ro mode. */
-               writel(0, host->ioaddr + SDMMC_CACR);
-               dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n",
-                        clk_mul, real_gck_rate);
-       }
+
+       gck_rate = clk_get_rate(priv->gck);
+       if (priv->soc_data->baseclk_is_generated_internally)
+               clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
+       else
+               clk_base_rate = clk_get_rate(priv->mainck);
+
+       clk_base = clk_base_rate / 1000000;
+       clk_mul = gck_rate / clk_base_rate - 1;
+
+       caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
+       caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+       caps1 &= ~SDHCI_CLOCK_MUL_MASK;
+       caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+       /* Set capabilities in r/w mode. */
+       writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+       writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
+       writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+       /* Set capabilities in ro mode. */
+       writel(0, host->ioaddr + SDMMC_CACR);
+
+       dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+                clk_mul, gck_rate, clk_base_rate);
 
        /*
         * We have to set preset values because it depends on the clk_mul
@@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
         * maximum sd clock value is 120 MHz instead of 208 MHz. For that
         * reason, we need to use presets to support SDR104.
         */
-       preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR12);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR25);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR50);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_SDR104);
-       preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+       preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
        writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
               host->ioaddr + SDHCI_PRESET_FOR_DDR50);
 
@@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
 static int sdhci_at91_probe(struct platform_device *pdev)
 {
        const struct of_device_id       *match;
-       const struct sdhci_pltfm_data   *soc_data;
+       const struct sdhci_at91_soc_data        *soc_data;
        struct sdhci_host               *host;
        struct sdhci_pltfm_host         *pltfm_host;
        struct sdhci_at91_priv          *priv;
@@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev)
                return -EINVAL;
        soc_data = match->data;
 
-       host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+       host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
        if (IS_ERR(host))
                return PTR_ERR(host);
 
        pltfm_host = sdhci_priv(host);
        priv = sdhci_pltfm_priv(pltfm_host);
+       priv->soc_data = soc_data;
 
        priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
        if (IS_ERR(priv->mainck)) {
-               dev_err(&pdev->dev, "failed to get baseclk\n");
-               return PTR_ERR(priv->mainck);
+               if (soc_data->baseclk_is_generated_internally) {
+                       priv->mainck = NULL;
+               } else {
+                       dev_err(&pdev->dev, "failed to get baseclk\n");
+                       ret = PTR_ERR(priv->mainck);
+                       goto sdhci_pltfm_free;
+               }
        }
 
        priv->hclock = devm_clk_get(&pdev->dev, "hclock");
        if (IS_ERR(priv->hclock)) {
                dev_err(&pdev->dev, "failed to get hclock\n");
-               return PTR_ERR(priv->hclock);
+               ret = PTR_ERR(priv->hclock);
+               goto sdhci_pltfm_free;
        }
 
        priv->gck = devm_clk_get(&pdev->dev, "multclk");
        if (IS_ERR(priv->gck)) {
                dev_err(&pdev->dev, "failed to get multclk\n");
-               return PTR_ERR(priv->gck);
+               ret = PTR_ERR(priv->gck);
+               goto sdhci_pltfm_free;
        }
 
        ret = sdhci_at91_set_clks_presets(&pdev->dev);
index 5cca3fa4610bce0cffff13ff18160e3f05b73604..5d8dd870bd4429cd40cb17d56614eceba972c8d8 100644 (file)
@@ -80,6 +80,7 @@ struct sdhci_esdhc {
        bool quirk_tuning_erratum_type1;
        bool quirk_tuning_erratum_type2;
        bool quirk_ignore_data_inhibit;
+       bool quirk_delay_before_data_reset;
        bool in_sw_tuning;
        unsigned int peripheral_clock;
        const struct esdhc_clk_fixup *clk_fixup;
@@ -172,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host,
        u16 ret;
        int shift = (spec_reg & 0x2) * 8;
 
+       if (spec_reg == SDHCI_TRANSFER_MODE)
+               return pltfm_host->xfer_mode_shadow;
+
        if (spec_reg == SDHCI_HOST_VERSION)
                ret = value & 0xffff;
        else
@@ -561,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
 
 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
 {
-       u32 val;
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
        ktime_t timeout;
+       u32 val, clk_en;
+
+       clk_en = ESDHC_CLOCK_SDCLKEN;
+
+       /*
+        * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
+        * is 2.2 or lower.
+        */
+       if (esdhc->vendor_ver <= VENDOR_V_22)
+               clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+                          ESDHC_CLOCK_PEREN);
 
        val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
 
        if (enable)
-               val |= ESDHC_CLOCK_SDCLKEN;
+               val |= clk_en;
        else
-               val &= ~ESDHC_CLOCK_SDCLKEN;
+               val &= ~clk_en;
 
        sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
 
-       /* Wait max 20 ms */
+       /*
+        * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+        * wait clock stable bit which does not exist.
+        */
        timeout = ktime_add_ms(ktime_get(), 20);
-       val = ESDHC_CLOCK_STABLE;
-       while  (1) {
+       while (esdhc->vendor_ver > VENDOR_V_22) {
                bool timedout = ktime_after(ktime_get(), timeout);
 
-               if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+               if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
                        break;
                if (timedout) {
                        pr_err("%s: Internal clock never stabilised.\n",
                                mmc_hostname(host->mmc));
                        break;
                }
-               udelay(10);
+               usleep_range(10, 20);
        }
 }
 
@@ -620,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-       int pre_div = 1;
-       int div = 1;
-       int division;
+       unsigned int pre_div = 1, div = 1;
+       unsigned int clock_fixup = 0;
        ktime_t timeout;
-       long fixup = 0;
        u32 temp;
 
-       host->mmc->actual_clock = 0;
-
        if (clock == 0) {
+               host->mmc->actual_clock = 0;
                esdhc_clock_enable(host, false);
                return;
        }
 
-       /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+       /* Start pre_div at 2 for vendor version < 2.3. */
        if (esdhc->vendor_ver < VENDOR_V_23)
                pre_div = 2;
 
+       /* Fix clock value. */
        if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
-               esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
-               fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+           esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+               clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
        else if (esdhc->clk_fixup)
-               fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
+               clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
 
-       if (fixup && clock > fixup)
-               clock = fixup;
+       if (clock_fixup == 0 || clock < clock_fixup)
+               clock_fixup = clock;
 
-       temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-       temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
-                 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
-       sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
-
-       while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+       /* Calculate pre_div and div. */
+       while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
                pre_div *= 2;
 
-       while (host->max_clk / pre_div / div > clock && div < 16)
+       while (host->max_clk / pre_div / div > clock_fixup && div < 16)
                div++;
 
+       esdhc->div_ratio = pre_div * div;
+
+       /* Limit clock division for HS400 200MHz clock for quirk. */
        if (esdhc->quirk_limited_clk_division &&
            clock == MMC_HS200_MAX_DTR &&
            (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
             host->flags & SDHCI_HS400_TUNING)) {
-               division = pre_div * div;
-               if (division <= 4) {
+               if (esdhc->div_ratio <= 4) {
                        pre_div = 4;
                        div = 1;
-               } else if (division <= 8) {
+               } else if (esdhc->div_ratio <= 8) {
                        pre_div = 4;
                        div = 2;
-               } else if (division <= 12) {
+               } else if (esdhc->div_ratio <= 12) {
                        pre_div = 4;
                        div = 3;
                } else {
                        pr_warn("%s: using unsupported clock division.\n",
                                mmc_hostname(host->mmc));
                }
+               esdhc->div_ratio = pre_div * div;
        }
 
+       host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
+
        dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
-               clock, host->max_clk / pre_div / div);
-       host->mmc->actual_clock = host->max_clk / pre_div / div;
-       esdhc->div_ratio = pre_div * div;
+               clock, host->mmc->actual_clock);
+
+       /* Set clock division into register. */
        pre_div >>= 1;
        div--;
 
+       esdhc_clock_enable(host, false);
+
        temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-       temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
-               | (div << ESDHC_DIVIDER_SHIFT)
-               (pre_div << ESDHC_PREDIV_SHIFT));
+       temp &= ~ESDHC_CLOCK_MASK;
+       temp |= ((div << ESDHC_DIVIDER_SHIFT) |
+               (pre_div << ESDHC_PREDIV_SHIFT));
        sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
 
+       /*
+        * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+        * wait clock stable bit which does not exist.
+        */
+       timeout = ktime_add_ms(ktime_get(), 20);
+       while (esdhc->vendor_ver > VENDOR_V_22) {
+               bool timedout = ktime_after(ktime_get(), timeout);
+
+               if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+                       break;
+               if (timedout) {
+                       pr_err("%s: Internal clock never stabilised.\n",
+                               mmc_hostname(host->mmc));
+                       break;
+               }
+               usleep_range(10, 20);
+       }
+
+       /* Additional setting for HS400. */
        if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
            clock == MMC_HS200_MAX_DTR) {
                temp = sdhci_readl(host, ESDHC_TBCTL);
@@ -710,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
                esdhc_clock_enable(host, false);
                esdhc_flush_async_fifo(host);
        }
-
-       /* Wait max 20 ms */
-       timeout = ktime_add_ms(ktime_get(), 20);
-       while (1) {
-               bool timedout = ktime_after(ktime_get(), timeout);
-
-               if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
-                       break;
-               if (timedout) {
-                       pr_err("%s: Internal clock never stabilised.\n",
-                               mmc_hostname(host->mmc));
-                       return;
-               }
-               udelay(10);
-       }
-
-       temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-       temp |= ESDHC_CLOCK_SDCLKEN;
-       sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+       esdhc_clock_enable(host, true);
 }
 
 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
@@ -757,21 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-       u32 val;
+       u32 val, bus_width = 0;
+
+       /*
+        * Add delay to make sure all the DMA transfers are finished
+        * for quirk.
+        */
+       if (esdhc->quirk_delay_before_data_reset &&
+           (mask & SDHCI_RESET_DATA) &&
+           (host->flags & SDHCI_REQ_USE_DMA))
+               mdelay(5);
+
+       /*
+        * Save bus-width for eSDHC whose vendor version is 2.2
+        * or lower for data reset.
+        */
+       if ((mask & SDHCI_RESET_DATA) &&
+           (esdhc->vendor_ver <= VENDOR_V_22)) {
+               val = sdhci_readl(host, ESDHC_PROCTL);
+               bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+       }
 
        sdhci_reset(host, mask);
 
-       sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
-       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+       /*
+        * Restore bus-width setting and interrupt registers for eSDHC
+        * whose vendor version is 2.2 or lower for data reset.
+        */
+       if ((mask & SDHCI_RESET_DATA) &&
+           (esdhc->vendor_ver <= VENDOR_V_22)) {
+               val = sdhci_readl(host, ESDHC_PROCTL);
+               val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+               val |= bus_width;
+               sdhci_writel(host, val, ESDHC_PROCTL);
 
-       if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
-               mdelay(5);
+               sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+               sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+       }
 
-       if (mask & SDHCI_RESET_ALL) {
+       /*
+        * Some bits have to be cleaned manually for eSDHC whose spec
+        * version is higher than 3.0 for all reset.
+        */
+       if ((mask & SDHCI_RESET_ALL) &&
+           (esdhc->spec_ver >= SDHCI_SPEC_300)) {
                val = sdhci_readl(host, ESDHC_TBCTL);
                val &= ~ESDHC_TB_EN;
                sdhci_writel(host, val, ESDHC_TBCTL);
 
+               /*
+                * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+                * 0 for quirk.
+                */
                if (esdhc->quirk_unreliable_pulse_detection) {
                        val = sdhci_readl(host, ESDHC_DLLCFG1);
                        val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
@@ -851,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
 }
 
 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
-       { .family = "QorIQ T1023", .revision = "1.0", },
-       { .family = "QorIQ T1040", .revision = "1.0", },
-       { .family = "QorIQ T2080", .revision = "1.0", },
-       { .family = "QorIQ LS1021A", .revision = "1.0", },
+       { .family = "QorIQ T1023", },
+       { .family = "QorIQ T1040", },
+       { .family = "QorIQ T2080", },
+       { .family = "QorIQ LS1021A", },
        { },
 };
 
 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
-       { .family = "QorIQ LS1012A", .revision = "1.0", },
-       { .family = "QorIQ LS1043A", .revision = "1.*", },
-       { .family = "QorIQ LS1046A", .revision = "1.0", },
-       { .family = "QorIQ LS1080A", .revision = "1.0", },
-       { .family = "QorIQ LS2080A", .revision = "1.0", },
-       { .family = "QorIQ LA1575A", .revision = "1.0", },
+       { .family = "QorIQ LS1012A", },
+       { .family = "QorIQ LS1043A", },
+       { .family = "QorIQ LS1046A", },
+       { .family = "QorIQ LS1080A", },
+       { .family = "QorIQ LS2080A", },
+       { .family = "QorIQ LA1575A", },
        { },
 };
 
@@ -885,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
        esdhc_clock_enable(host, true);
 }
 
-static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
                                    u8 *window_end)
 {
-       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
-       u8 tbstat_15_8, tbstat_7_0;
        u32 val;
 
-       if (esdhc->quirk_tuning_erratum_type1) {
-               *window_start = 5 * esdhc->div_ratio;
-               *window_end = 3 * esdhc->div_ratio;
-               return;
-       }
-
        /* Write TBCTL[11:8]=4'h8 */
        val = sdhci_readl(host, ESDHC_TBCTL);
        val &= ~(0xf << 8);
@@ -917,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
        val = sdhci_readl(host, ESDHC_TBSTAT);
        val = sdhci_readl(host, ESDHC_TBSTAT);
 
+       *window_end = val & 0xff;
+       *window_start = (val >> 8) & 0xff;
+}
+
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+                                   u8 *window_end)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+       u8 start_ptr, end_ptr;
+
+       if (esdhc->quirk_tuning_erratum_type1) {
+               *window_start = 5 * esdhc->div_ratio;
+               *window_end = 3 * esdhc->div_ratio;
+               return;
+       }
+
+       esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
+
        /* Reset data lines by setting ESDHCCTL[RSTD] */
        sdhci_reset(host, SDHCI_RESET_DATA);
        /* Write 32'hFFFF_FFFF to IRQSTAT register */
        sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
 
-       /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
-        * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+       /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
+        * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
         * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
         * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
         */
-       tbstat_7_0 = val & 0xff;
-       tbstat_15_8 = (val >> 8) & 0xff;
 
-       if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+       if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
                *window_start = 8 * esdhc->div_ratio;
                *window_end = 4 * esdhc->div_ratio;
        } else {
@@ -1003,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
                if (ret)
                        break;
 
+               /* For type2 affected platforms of the tuning erratum,
+                * tuning may succeed although eSDHC might not have
+                * tuned properly. Need to check tuning window.
+                */
+               if (esdhc->quirk_tuning_erratum_type2 &&
+                   !host->tuning_err) {
+                       esdhc_tuning_window_ptr(host, &window_start,
+                                               &window_end);
+                       if (abs(window_start - window_end) >
+                           (4 * esdhc->div_ratio + 2))
+                               host->tuning_err = -EAGAIN;
+               }
+
                /* If HW tuning fails and triggers erratum,
                 * try workaround.
                 */
@@ -1221,6 +1299,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
        if (match)
                esdhc->clk_fixup = match->data;
        np = pdev->dev.of_node;
+
+       if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
+               esdhc->quirk_delay_before_data_reset = true;
+
        clk = of_clk_get(np, 0);
        if (!IS_ERR(clk)) {
                /*
@@ -1231,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
                 * 1/2 peripheral clock.
                 */
                if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
-                   of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
+                   of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
+                   of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
                        esdhc->peripheral_clock = clk_get_rate(clk) / 2;
                else
                        esdhc->peripheral_clock = clk_get_rate(clk);
@@ -1303,8 +1386,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
                host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
 
        if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
-               host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
-               host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+               host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+               host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
        }
 
        if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
index 083e7e053c95401bcc6e87c6e8d5e3a52605e474..882053151a4741098684898e70bf5f1e03ea45ad 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/mmc/mmc.h>
 #include <linux/mmc/slot-gpio.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -85,6 +86,7 @@
 
 /* sdhci-omap controller flags */
 #define SDHCI_OMAP_REQUIRE_IODELAY     BIT(0)
+#define SDHCI_OMAP_SPECIAL_RESET       BIT(1)
 
 struct sdhci_omap_data {
        u32 offset;
@@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host)
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
 
        reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
-       reg |= CON_DMA_MASTER;
+       reg &= ~CON_DMA_MASTER;
+       /* Switch to DMA slave mode when using external DMA */
+       if (!host->use_external_dma)
+               reg |= CON_DMA_MASTER;
+
        sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
 
        return 0;
@@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
        sdhci_omap_start_clock(omap_host);
 }
 
+#define MMC_TIMEOUT_US         20000           /* 20000 micro Sec */
 static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+       unsigned long limit = MMC_TIMEOUT_US;
+       unsigned long i = 0;
 
        /* Don't reset data lines during tuning operation */
        if (omap_host->is_tuning)
                mask &= ~SDHCI_RESET_DATA;
 
+       if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
+               sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+               while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
+                      (i++ < limit))
+                       udelay(1);
+               i = 0;
+               while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
+                      (i++ < limit))
+                       udelay(1);
+
+               if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
+                       dev_err(mmc_dev(host->mmc),
+                               "Timeout waiting on controller reset in %s\n",
+                               __func__);
+               return;
+       }
+
        sdhci_reset(host, mask);
 }
 
@@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
        return intmask;
 }
 
+static void sdhci_omap_set_timeout(struct sdhci_host *host,
+                                  struct mmc_command *cmd)
+{
+       if (cmd->opcode == MMC_ERASE)
+               sdhci_set_data_timeout_irq(host, false);
+
+       __sdhci_set_timeout(host, cmd);
+}
+
 static struct sdhci_ops sdhci_omap_ops = {
        .set_clock = sdhci_omap_set_clock,
        .set_power = sdhci_omap_set_power,
@@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = {
        .reset = sdhci_omap_reset,
        .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
        .irq = sdhci_omap_irq,
+       .set_timeout = sdhci_omap_set_timeout,
 };
 
 static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = {
        .offset = 0x200,
 };
 
+static const struct sdhci_omap_data am335_data = {
+       .offset = 0x200,
+       .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data am437_data = {
+       .offset = 0x200,
+       .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
 static const struct sdhci_omap_data dra7_data = {
        .offset = 0x200,
        .flags  = SDHCI_OMAP_REQUIRE_IODELAY,
@@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = {
 static const struct of_device_id omap_sdhci_match[] = {
        { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
        { .compatible = "ti,k2g-sdhci", .data = &k2g_data },
+       { .compatible = "ti,am335-sdhci", .data = &am335_data },
+       { .compatible = "ti,am437-sdhci", .data = &am437_data },
        {},
 };
 MODULE_DEVICE_TABLE(of, omap_sdhci_match);
@@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        struct sdhci_omap_data *data;
        const struct soc_device_attribute *soc;
+       struct resource *regs;
 
        match = of_match_device(omap_sdhci_match, dev);
        if (!match)
@@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        }
        offset = data->offset;
 
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs)
+               return -ENXIO;
+
        host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
                                sizeof(*omap_host));
        if (IS_ERR(host)) {
@@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        omap_host->timing = MMC_TIMING_LEGACY;
        omap_host->flags = data->flags;
        host->ioaddr += offset;
+       host->mapbase = regs->start + offset;
 
        mmc = host->mmc;
        sdhci_get_of_property(pdev);
@@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
        host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
 
+       /* Switch to external DMA only if there is the "dmas" property */
+       if (of_find_property(dev->of_node, "dmas", NULL))
+               sdhci_switch_external_dma(host, true);
+
        ret = sdhci_setup_host(host);
        if (ret)
                goto err_put_sync;
index acefb76b4e153211c7e8dfe823ae2359590bef88..525de2454a4de1d9d17b628d6d27a2898eaea36c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mmc/slot-gpio.h>
 #include <linux/mmc/sdhci-pci-data.h>
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 
 #ifdef CONFIG_X86
 #include <asm/iosf_mbi.h>
@@ -783,11 +784,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
        return 0;
 }
 
+static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
+{
+       return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+              dmi_match(DMI_BIOS_VENDOR, "LENOVO");
+}
+
 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
        int ret = byt_emmc_probe_slot(slot);
 
-       slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+       if (!glk_broken_cqhci(slot))
+               slot->host->mmc->caps2 |= MMC_CAP2_CQE;
 
        if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
                slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
@@ -1983,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
 
        if (slot->cd_idx >= 0) {
                ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
-                                          slot->cd_override_level, 0, NULL);
+                                          slot->cd_override_level, 0);
                if (ret && ret != -EPROBE_DEFER)
                        ret = mmc_gpiod_request_cd(host->mmc, NULL,
                                                   slot->cd_idx,
                                                   slot->cd_override_level,
-                                                  0, NULL);
+                                                  0);
                if (ret == -EPROBE_DEFER)
                        goto remove;
 
index 51e096f27388edd909e62e0091a5a36058f93fc7..64200c78e90dc7fce4112abc0336c0c79b757bce 100644 (file)
@@ -117,7 +117,6 @@ struct sdhci_s3c {
        struct s3c_sdhci_platdata *pdata;
        int                     cur_clk;
        int                     ext_cd_irq;
-       int                     ext_cd_gpio;
 
        struct clk              *clk_io;
        struct clk              *clk_bus[MAX_BUS_CLK];
@@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct sdhci_host *host;
        struct sdhci_s3c *sc;
-       struct resource *res;
        int ret, irq, ptr, clks;
 
        if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
                        goto err_pdata_io_clk;
        } else {
                memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
-               sc->ext_cd_gpio = -1; /* invalid gpio number */
        }
 
        drv_data = sdhci_s3c_get_driver_data(pdev);
@@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
                goto err_no_busclks;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                goto err_req_regs;
index e431432233204c9ca0d4674773753e01b678de88..f4b05dd6c20a164b026f74c5d399982430cd31e7 100644 (file)
@@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
         * We must request the IRQ after sdhci_add_host(), as the tasklet only
         * gets setup in sdhci_add_host() and we oops.
         */
-       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                goto err_request_cd;
        if (!ret)
index 916b5b09c3d138034f3a096a8fe1575c0be18fcc..b4b63089a4e250eefb61937a18dfa073e5e44ec0 100644 (file)
@@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
 static int sdhci_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
-       struct resource *iomem;
        struct spear_sdhci *sdhci;
        struct device *dev;
        int ret;
@@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev)
                goto err;
        }
 
-       iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
@@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev)
         * It is optional to use GPIOs for sdhci card detection. If we
         * find a descriptor using slot GPIO, we use it.
         */
-       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                goto disable_clk;
 
index 7bc950520fd9525752f3801067d11fc7b77e9b31..403ac44a737822cbd754021ad644164ea6e0dc79 100644 (file)
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
                        misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
                if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
                        misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
-               if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+               if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
                        clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
        }
 
index 3140fe2e5dba861c5f281762cbd96ef46e18d03a..63db84481dff2fba90fe021c311334ca994a9d81 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/dmaengine.h>
 #include <linux/ktime.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
@@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 }
 
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
 {
        if (enable)
                host->ier |= SDHCI_INT_DATA_TIMEOUT;
@@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 }
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
 
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 {
-       u8 count;
-
-       if (host->ops->set_timeout) {
-               host->ops->set_timeout(host, cmd);
-       } else {
-               bool too_big = false;
-
-               count = sdhci_calc_timeout(host, cmd, &too_big);
-
-               if (too_big &&
-                   host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
-                       sdhci_calc_sw_timeout(host, cmd);
-                       sdhci_set_data_timeout_irq(host, false);
-               } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
-                       sdhci_set_data_timeout_irq(host, true);
-               }
-
-               sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+       bool too_big = false;
+       u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+       if (too_big &&
+           host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+               sdhci_calc_sw_timeout(host, cmd);
+               sdhci_set_data_timeout_irq(host, false);
+       } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+               sdhci_set_data_timeout_irq(host, true);
        }
+
+       sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
 }
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
 
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 {
-       struct mmc_data *data = cmd->data;
-
-       host->data_timeout = 0;
-
-       if (sdhci_data_line_cmd(cmd))
-               sdhci_set_timeout(host, cmd);
-
-       if (!data)
-               return;
+       if (host->ops->set_timeout)
+               host->ops->set_timeout(host, cmd);
+       else
+               __sdhci_set_timeout(host, cmd);
+}
 
+static void sdhci_initialize_data(struct sdhci_host *host,
+                                 struct mmc_data *data)
+{
        WARN_ON(host->data);
 
        /* Sanity checks */
@@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
        host->data = data;
        host->data_early = 0;
        host->data->bytes_xfered = 0;
+}
+
+static inline void sdhci_set_block_info(struct sdhci_host *host,
+                                       struct mmc_data *data)
+{
+       /* Set the DMA boundary value and block size */
+       sdhci_writew(host,
+                    SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+                    SDHCI_BLOCK_SIZE);
+       /*
+        * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+        * can be supported, in that case 16-bit block count register must be 0.
+        */
+       if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+           (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+               if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+                       sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+               sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+       } else {
+               sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+       }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+       struct mmc_data *data = cmd->data;
+
+       sdhci_initialize_data(host, data);
 
        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
                struct scatterlist *sg;
@@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 
        sdhci_set_transfer_irqs(host);
 
-       /* Set the DMA boundary value and block size */
-       sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
-                    SDHCI_BLOCK_SIZE);
+       sdhci_set_block_info(host, data);
+}
 
-       /*
-        * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
-        * can be supported, in that case 16-bit block count register must be 0.
-        */
-       if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
-           (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
-               if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
-                       sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
-               sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+
+static int sdhci_external_dma_init(struct sdhci_host *host)
+{
+       int ret = 0;
+       struct mmc_host *mmc = host->mmc;
+
+       host->tx_chan = dma_request_chan(mmc->parent, "tx");
+       if (IS_ERR(host->tx_chan)) {
+               ret = PTR_ERR(host->tx_chan);
+               if (ret != -EPROBE_DEFER)
+                       pr_warn("Failed to request TX DMA channel.\n");
+               host->tx_chan = NULL;
+               return ret;
+       }
+
+       host->rx_chan = dma_request_chan(mmc->parent, "rx");
+       if (IS_ERR(host->rx_chan)) {
+               if (host->tx_chan) {
+                       dma_release_channel(host->tx_chan);
+                       host->tx_chan = NULL;
+               }
+
+               ret = PTR_ERR(host->rx_chan);
+               if (ret != -EPROBE_DEFER)
+                       pr_warn("Failed to request RX DMA channel.\n");
+               host->rx_chan = NULL;
+       }
+
+       return ret;
+}
+
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+                                                  struct mmc_data *data)
+{
+       return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static int sdhci_external_dma_setup(struct sdhci_host *host,
+                                   struct mmc_command *cmd)
+{
+       int ret, i;
+       enum dma_transfer_direction dir;
+       struct dma_async_tx_descriptor *desc;
+       struct mmc_data *data = cmd->data;
+       struct dma_chan *chan;
+       struct dma_slave_config cfg;
+       dma_cookie_t cookie;
+       int sg_cnt;
+
+       if (!host->mapbase)
+               return -EINVAL;
+
+       cfg.src_addr = host->mapbase + SDHCI_BUFFER;
+       cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
+       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       cfg.src_maxburst = data->blksz / 4;
+       cfg.dst_maxburst = data->blksz / 4;
+
+       /* Sanity check: all the SG entries must be aligned by block size. */
+       for (i = 0; i < data->sg_len; i++) {
+               if ((data->sg + i)->length % data->blksz)
+                       return -EINVAL;
+       }
+
+       chan = sdhci_external_dma_channel(host, data);
+
+       ret = dmaengine_slave_config(chan, &cfg);
+       if (ret)
+               return ret;
+
+       sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+       if (sg_cnt <= 0)
+               return -EINVAL;
+
+       dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+       desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
+                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc)
+               return -EINVAL;
+
+       desc->callback = NULL;
+       desc->callback_param = NULL;
+
+       cookie = dmaengine_submit(desc);
+       if (dma_submit_error(cookie))
+               ret = cookie;
+
+       return ret;
+}
+
+static void sdhci_external_dma_release(struct sdhci_host *host)
+{
+       if (host->tx_chan) {
+               dma_release_channel(host->tx_chan);
+               host->tx_chan = NULL;
+       }
+
+       if (host->rx_chan) {
+               dma_release_channel(host->rx_chan);
+               host->rx_chan = NULL;
+       }
+
+       sdhci_switch_external_dma(host, false);
+}
+
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
+                                             struct mmc_command *cmd)
+{
+       struct mmc_data *data = cmd->data;
+
+       sdhci_initialize_data(host, data);
+
+       host->flags |= SDHCI_REQ_USE_DMA;
+       sdhci_set_transfer_irqs(host);
+
+       sdhci_set_block_info(host, data);
+}
+
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+                                           struct mmc_command *cmd)
+{
+       if (!sdhci_external_dma_setup(host, cmd)) {
+               __sdhci_external_dma_prepare_data(host, cmd);
        } else {
-               sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+               sdhci_external_dma_release(host);
+               pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
+                      mmc_hostname(host->mmc));
+               sdhci_prepare_data(host, cmd);
        }
 }
 
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+                                           struct mmc_command *cmd)
+{
+       struct dma_chan *chan;
+
+       if (!cmd->data)
+               return;
+
+       chan = sdhci_external_dma_channel(host, cmd->data);
+       if (chan)
+               dma_async_issue_pending(chan);
+}
+
+#else
+
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+                                                  struct mmc_command *cmd)
+{
+       /* This should never happen */
+       WARN_ON_ONCE(1);
+}
+
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+                                                  struct mmc_command *cmd)
+{
+}
+
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+                                                         struct mmc_data *data)
+{
+       return NULL;
+}
+
+#endif
+
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
+{
+       host->use_external_dma = en;
+}
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
+
 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
                                    struct mmc_request *mrq)
 {
@@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
                 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
 }
 
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
 {
        int i;
 
-       if (host->cmd && host->cmd->mrq == mrq)
-               host->cmd = NULL;
-
-       if (host->data_cmd && host->data_cmd->mrq == mrq)
-               host->data_cmd = NULL;
-
-       if (host->data && host->data->mrq == mrq)
-               host->data = NULL;
-
-       if (sdhci_needs_reset(host, mrq))
-               host->pending_reset = true;
-
        for (i = 0; i < SDHCI_MAX_MRQS; i++) {
                if (host->mrqs_done[i] == mrq) {
                        WARN_ON(1);
@@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
        }
 
        WARN_ON(i >= SDHCI_MAX_MRQS);
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+       if (host->cmd && host->cmd->mrq == mrq)
+               host->cmd = NULL;
+
+       if (host->data_cmd && host->data_cmd->mrq == mrq)
+               host->data_cmd = NULL;
+
+       if (host->data && host->data->mrq == mrq)
+               host->data = NULL;
+
+       if (sdhci_needs_reset(host, mrq))
+               host->pending_reset = true;
+
+       sdhci_set_mrq_done(host, mrq);
 
        sdhci_del_timer(host, mrq);
 
@@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host)
 
        /*
         * Need to send CMD12 if -
-        * a) open-ended multiblock transfer (no CMD23)
+        * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
         * b) error in multiblock transfer
         */
        if (data->stop &&
-           (data->error ||
-            !data->mrq->sbc)) {
+           ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
+            data->error)) {
                /*
                 * 'cap_cmd_during_tfr' request must not use the command line
                 * after mmc_command_done() has been called. It is upper layer's
@@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
        }
 
        host->cmd = cmd;
+       host->data_timeout = 0;
        if (sdhci_data_line_cmd(cmd)) {
                WARN_ON(host->data_cmd);
                host->data_cmd = cmd;
+               sdhci_set_timeout(host, cmd);
        }
 
-       sdhci_prepare_data(host, cmd);
+       if (cmd->data) {
+               if (host->use_external_dma)
+                       sdhci_external_dma_prepare_data(host, cmd);
+               else
+                       sdhci_prepare_data(host, cmd);
+       }
 
        sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
 
@@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
                timeout += 10 * HZ;
        sdhci_mod_timer(host, cmd->mrq, timeout);
 
+       if (host->use_external_dma)
+               sdhci_external_dma_pre_transfer(host, cmd);
+
        sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
 }
 EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_led_activate(host);
 
-       /*
-        * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
-        * requests if Auto-CMD12 is enabled.
-        */
-       if (sdhci_auto_cmd12(host, mrq)) {
-               if (mrq->stop) {
-                       mrq->data->stop = NULL;
-                       mrq->stop = NULL;
-               }
-       }
-
        if (!present || host->flags & SDHCI_DEVICE_DEAD) {
                mrq->cmd->error = -ENOMEDIUM;
                sdhci_finish_mrq(host, mrq);
@@ -1882,9 +2077,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
        else if (timing == MMC_TIMING_UHS_SDR12)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
-       else if (timing == MMC_TIMING_SD_HS ||
-                timing == MMC_TIMING_MMC_HS ||
-                timing == MMC_TIMING_UHS_SDR25)
+       else if (timing == MMC_TIMING_UHS_SDR25)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
        else if (timing == MMC_TIMING_UHS_SDR50)
                ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
@@ -2419,8 +2612,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
                sdhci_send_tuning(host, opcode);
 
                if (!host->tuning_done) {
-                       pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
-                               mmc_hostname(host->mmc));
+                       pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
+                                mmc_hostname(host->mmc));
                        sdhci_abort_tuning(host, opcode);
                        return -ETIMEDOUT;
                }
@@ -2663,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
        if (host->flags & SDHCI_REQ_USE_DMA) {
                struct mmc_data *data = mrq->data;
 
+               if (host->use_external_dma && data &&
+                   (mrq->cmd->error || data->error)) {
+                       struct dma_chan *chan = sdhci_external_dma_channel(host, data);
+
+                       host->mrqs_done[i] = NULL;
+                       spin_unlock_irqrestore(&host->lock, flags);
+                       dmaengine_terminate_sync(chan);
+                       spin_lock_irqsave(&host->lock, flags);
+                       sdhci_set_mrq_done(host, mrq);
+               }
+
                if (data && data->host_cookie == COOKIE_MAPPED) {
                        if (host->bounce_buffer) {
                                /*
@@ -3769,6 +3973,9 @@ int sdhci_setup_host(struct sdhci_host *host)
                       mmc_hostname(mmc), host->version);
        }
 
+       if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
+               mmc->caps2 &= ~MMC_CAP2_CQE;
+
        if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
                host->flags |= SDHCI_USE_SDMA;
        else if (!(host->caps & SDHCI_CAN_DO_SDMA))
@@ -3795,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host)
        if (sdhci_can_64bit_dma(host))
                host->flags |= SDHCI_USE_64_BIT_DMA;
 
+       if (host->use_external_dma) {
+               ret = sdhci_external_dma_init(host);
+               if (ret == -EPROBE_DEFER)
+                       goto unreg;
+               /*
+                * Fall back to use the DMA/PIO integrated in standard SDHCI
+                * instead of external DMA devices.
+                */
+               else if (ret)
+                       sdhci_switch_external_dma(host, false);
+               /* Disable internal DMA sources */
+               else
+                       host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+       }
+
        if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
                if (host->ops->set_dma_mask)
                        ret = host->ops->set_dma_mask(host);
@@ -3821,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host)
                dma_addr_t dma;
                void *buf;
 
-               if (host->flags & SDHCI_USE_64_BIT_DMA) {
-                       host->adma_table_sz = host->adma_table_cnt *
-                                             SDHCI_ADMA2_64_DESC_SZ(host);
-                       host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
-               } else {
-                       host->adma_table_sz = host->adma_table_cnt *
-                                             SDHCI_ADMA2_32_DESC_SZ;
-                       host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
-               }
+               if (!(host->flags & SDHCI_USE_64_BIT_DMA))
+                       host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+               else if (!host->alloc_desc_sz)
+                       host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+
+               host->desc_sz = host->alloc_desc_sz;
+               host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
 
                host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
                /*
@@ -3912,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host)
        if (host->ops->get_min_clock)
                mmc->f_min = host->ops->get_min_clock(host);
        else if (host->version >= SDHCI_SPEC_300) {
-               if (host->clk_mul) {
-                       mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+               if (host->clk_mul)
                        max_clk = host->max_clk * host->clk_mul;
-               } else
-                       mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+               /*
+                * Divided Clock Mode minimum clock rate is always less than
+                * Programmable Clock Mode minimum clock rate.
+                */
+               mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
        } else
                mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
@@ -4275,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host)
                dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
                                  host->adma_table_sz, host->align_buffer,
                                  host->align_addr);
+
+       if (host->use_external_dma)
+               sdhci_external_dma_release(host);
+
        host->adma_table = NULL;
        host->align_buffer = NULL;
 }
@@ -4320,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host)
 
        pr_info("%s: SDHCI controller on %s [%s] using %s\n",
                mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+               host->use_external_dma ? "External DMA" :
                (host->flags & SDHCI_USE_ADMA) ?
                (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
                (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -4408,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
                                  host->adma_table_sz, host->align_buffer,
                                  host->align_addr);
 
+       if (host->use_external_dma)
+               sdhci_external_dma_release(host);
+
        host->adma_table = NULL;
        host->align_buffer = NULL;
 }
index 0ed3e0eaef5f99e09ac1e299264983d9925a38dc..a6a3ddcf97e7122d9100b486da242b796ac7b6f0 100644 (file)
@@ -409,6 +409,8 @@ struct sdhci_host {
 #define SDHCI_QUIRK_BROKEN_CARD_DETECTION              (1<<15)
 /* Controller reports inverted write-protect state */
 #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT             (1<<16)
+/* Controller has unusable command queue engine */
+#define SDHCI_QUIRK_BROKEN_CQE                         (1<<17)
 /* Controller does not like fast PIO transfers */
 #define SDHCI_QUIRK_PIO_NEEDS_DELAY                    (1<<18)
 /* Controller does not have a LED */
@@ -485,6 +487,7 @@ struct sdhci_host {
 
        int irq;                /* Device IRQ */
        void __iomem *ioaddr;   /* Mapped address */
+       phys_addr_t mapbase;    /* physical address base */
        char *bounce_buffer;    /* For packing SDMA reads/writes */
        dma_addr_t bounce_addr;
        unsigned int bounce_buffer_size;
@@ -533,6 +536,7 @@ struct sdhci_host {
        bool pending_reset;     /* Cmd/data reset is pending */
        bool irq_wake_enabled;  /* IRQ wakeup is enabled */
        bool v4_mode;           /* Host Version 4 Enable */
+       bool use_external_dma;  /* Host selects to use external DMA */
 
        struct mmc_request *mrqs_done[SDHCI_MAX_MRQS];  /* Requests done */
        struct mmc_command *cmd;        /* Current command */
@@ -554,7 +558,8 @@ struct sdhci_host {
        dma_addr_t adma_addr;   /* Mapped ADMA descr. table */
        dma_addr_t align_addr;  /* Mapped bounce buffer */
 
-       unsigned int desc_sz;   /* ADMA descriptor size */
+       unsigned int desc_sz;   /* ADMA current descriptor size */
+       unsigned int alloc_desc_sz;     /* ADMA descr. max size host supports */
 
        struct workqueue_struct *complete_wq;   /* Request completion wq */
        struct work_struct      complete_work;  /* Request completion work */
@@ -562,6 +567,11 @@ struct sdhci_host {
        struct timer_list timer;        /* Timer for timeouts */
        struct timer_list data_timer;   /* Timer for data timeouts */
 
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+       struct dma_chan *rx_chan;
+       struct dma_chan *tx_chan;
+#endif
+
        u32 caps;               /* CAPABILITY_0 */
        u32 caps1;              /* CAPABILITY_1 */
        bool read_caps;         /* Capability flags have been read */
@@ -793,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host);
 void sdhci_reset_tuning(struct sdhci_host *host);
 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
 
 #endif /* __SDHCI_HW_H */
index b8e897e31e2e298c956da29ee63481dd46df5489..3afea580fbea3fb6c7fe8298dc3cb7bed76457bb 100644 (file)
@@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
        writeb(val, host->ioaddr + reg);
 }
 
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       int err = sdhci_execute_tuning(mmc, opcode);
+
+       if (err)
+               return err;
+       /*
+        * Tuning data remains in the buffer after tuning.
+        * Do a command and data reset to get rid of it
+        */
+       sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+       return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+       return 0;
+}
+
 static struct sdhci_ops sdhci_am654_ops = {
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = {
        .set_power = sdhci_am654_set_power,
        .set_clock = sdhci_am654_set_clock,
        .write_b = sdhci_am654_write_b,
+       .irq = sdhci_am654_cqhci_irq,
        .reset = sdhci_reset,
 };
 
 static const struct sdhci_pltfm_data sdhci_am654_pdata = {
        .ops = &sdhci_am654_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
        .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
 };
 
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
-       int cmd_error = 0;
-       int data_error = 0;
-
-       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
-               return intmask;
-
-       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
-       return 0;
-}
-
 static struct sdhci_ops sdhci_j721e_8bit_ops = {
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
        .ops = &sdhci_j721e_8bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
        .ops = &sdhci_j721e_4bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
        struct sdhci_am654_data *sdhci_am654;
        const struct of_device_id *match;
        struct sdhci_host *host;
-       struct resource *res;
        struct clk *clk_xin;
        struct device *dev = &pdev->dev;
        void __iomem *base;
@@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
                goto pm_runtime_disable;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       base = devm_ioremap_resource(dev, res);
+       base = devm_platform_ioremap_resource(pdev, 1);
        if (IS_ERR(base)) {
                ret = PTR_ERR(base);
                goto pm_runtime_put;
@@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
                goto pm_runtime_put;
        }
 
+       host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
        ret = sdhci_am654_init(host);
        if (ret)
                goto pm_runtime_put;
index fa0dfc657c229a37aad269ab319c07a9af22e6f2..4625cc071b61af737b577a19404c0da202cf1890 100644 (file)
@@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
        struct device *dev = &pdev->dev;
-       struct resource *res;
        int irq, ctrl = 0, ret = 0;
        struct f_sdhost_priv *priv;
        u32 reg = 0;
@@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
        host->ops = &sdhci_f_sdh30_ops;
        host->irq = irq;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->ioaddr)) {
                ret = PTR_ERR(host->ioaddr);
                goto err;
index 98c575de43c755ed5772150d1bdbb6538a143332..7e1fd557109c04761c7b54bb4d87a3ad36215b61 100644 (file)
@@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
                host->chan_rx = sh_mmcif_request_dma_pdata(host,
                                                        pdata->slave_id_rx);
        } else {
-               host->chan_tx = dma_request_slave_channel(dev, "tx");
-               host->chan_rx = dma_request_slave_channel(dev, "rx");
+               host->chan_tx = dma_request_chan(dev, "tx");
+               if (IS_ERR(host->chan_tx))
+                       host->chan_tx = NULL;
+               host->chan_rx = dma_request_chan(dev, "rx");
+               if (IS_ERR(host->chan_rx))
+                       host->chan_rx = NULL;
        }
        dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
                host->chan_rx);
@@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        struct sh_mmcif_host *host;
        struct device *dev = &pdev->dev;
        struct sh_mmcif_plat_data *pd = dev->platform_data;
-       struct resource *res;
        void __iomem *reg;
        const char *name;
 
@@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        if (irq[0] < 0)
                return -ENXIO;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       reg = devm_ioremap_resource(dev, res);
+       reg = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(reg))
                return PTR_ERR(reg);
 
index d577a6b0ceae2a078d4932764f82095f4f8d5892..f87d7967457f6dbb736f5f797aec563a43222c70 100644 (file)
@@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
        if (ret)
                return ret;
 
-       host->reg_base = devm_ioremap_resource(&pdev->dev,
-                             platform_get_resource(pdev, IORESOURCE_MEM, 0));
+       host->reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(host->reg_base))
                return PTR_ERR(host->reg_base);
 
index c4a1d49fbea471747b57510f4bfee0d3e90af0cd..1e424bcdbd5f52798def6f972b207ef76ec7d16b 100644 (file)
@@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
 {
        struct tmio_mmc_host *host;
        struct mmc_host *mmc;
-       struct resource *res;
        void __iomem *ctl;
        int ret;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ctl = devm_ioremap_resource(&pdev->dev, res);
+       ctl = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ctl))
                return ERR_CAST(ctl);
 
@@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
         * Look for a card detect GPIO, if it fails with anything
         * else than a probe deferral, just live without it.
         */
-       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+       ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
        if (ret == -EPROBE_DEFER)
                return ret;
 
index 0c72ec5546c36d20c9fdbfae8db76f4848134309..a1683c49cb903b8ef5806d2d85cea45452054b5e 100644 (file)
@@ -59,7 +59,6 @@
 struct uniphier_sd_priv {
        struct tmio_mmc_data tmio_data;
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pinstate_default;
        struct pinctrl_state *pinstate_uhs;
        struct clk *clk;
        struct reset_control *rst;
@@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
 {
        struct tmio_mmc_host *host = mmc_priv(mmc);
        struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
-       struct pinctrl_state *pinstate;
+       struct pinctrl_state *pinstate = NULL;
        u32 val, tmp;
 
        switch (ios->signal_voltage) {
        case MMC_SIGNAL_VOLTAGE_330:
                val = UNIPHIER_SD_VOLT_330;
-               pinstate = priv->pinstate_default;
                break;
        case MMC_SIGNAL_VOLTAGE_180:
                val = UNIPHIER_SD_VOLT_180;
@@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
        tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
        writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
 
-       pinctrl_select_state(priv->pinctrl, pinstate);
+       if (pinstate)
+               pinctrl_select_state(priv->pinctrl, pinstate);
+       else
+               pinctrl_select_default_state(mmc_dev(mmc));
 
        return 0;
 }
@@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
        if (IS_ERR(priv->pinctrl))
                return PTR_ERR(priv->pinctrl);
 
-       priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
-                                                     PINCTRL_STATE_DEFAULT);
-       if (IS_ERR(priv->pinstate_default))
-               return PTR_ERR(priv->pinstate_default);
-
        priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
        if (IS_ERR(priv->pinstate_uhs))
                return PTR_ERR(priv->pinstate_uhs);
index b11ac2314328d9b997b785b8aa59a2dc6cd8ee8a..9a0b1e4e405dc3bb788d603423524123b3e30e0d 100644 (file)
@@ -199,7 +199,6 @@ struct usdhi6_host {
 
        /* Pin control */
        struct pinctrl *pinctrl;
-       struct pinctrl_state *pins_default;
        struct pinctrl_state *pins_uhs;
 };
 
@@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
        };
        int ret;
 
-       host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+       host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
        dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
                host->chan_tx);
 
-       if (!host->chan_tx)
+       if (IS_ERR(host->chan_tx)) {
+               host->chan_tx = NULL;
                return;
+       }
 
        cfg.direction = DMA_MEM_TO_DEV;
        cfg.dst_addr = start + USDHI6_SD_BUF0;
@@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
        if (ret < 0)
                goto e_release_tx;
 
-       host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+       host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
        dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
                host->chan_rx);
 
-       if (!host->chan_rx)
+       if (IS_ERR(host->chan_rx)) {
+               host->chan_rx = NULL;
                goto e_release_tx;
+       }
 
        cfg.direction = DMA_DEV_TO_MEM;
        cfg.src_addr = cfg.dst_addr;
@@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
                                            host->pins_uhs);
 
        default:
-               return pinctrl_select_state(host->pinctrl,
-                                           host->pins_default);
+               return pinctrl_select_default_state(mmc_dev(host->mmc));
        }
 }
 
@@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev)
        }
 
        host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
-       if (!IS_ERR(host->pins_uhs)) {
-               host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                                                         PINCTRL_STATE_DEFAULT);
-
-               if (IS_ERR(host->pins_default)) {
-                       dev_err(dev,
-                               "UHS pinctrl requires a default pin state.\n");
-                       ret = PTR_ERR(host->pins_default);
-                       goto e_free_mmc;
-               }
-       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        host->base = devm_ioremap_resource(dev, res);
index f4ac064ff471ddd0aa652f0721951346930e872c..e48bddd95ce6bc6e5775c0f7c34319747b72684d 100644 (file)
@@ -1106,7 +1106,7 @@ static int via_sd_probe(struct pci_dev *pcidev,
 
        len = pci_resource_len(pcidev, 0);
        base = pci_resource_start(pcidev, 0);
-       sdhost->mmiobase = ioremap_nocache(base, len);
+       sdhost->mmiobase = ioremap(base, len);
        if (!sdhost->mmiobase) {
                ret = -ENOMEM;
                goto free_mmc_host;
index eccf2e5d905e89309dbfcb3221855f4a100451da..3af50db8b21b4f274e65a7cffa0acc6441fc7ade 100644 (file)
@@ -320,7 +320,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
         * ChipCommon revision.
         */
        if (b47s->bcma_cc->core->id.rev == 54)
-               b47s->window = ioremap_nocache(res->start, resource_size(res));
+               b47s->window = ioremap(res->start, resource_size(res));
        else
                b47s->window = ioremap_cache(res->start, resource_size(res));
        if (!b47s->window) {
index 462fadb56bdb190d2f91e36ae16ca4ea53a0bbcf..42a95ba40f2cb74406da23c406f19cca9de34024 100644 (file)
@@ -163,7 +163,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
        /* FIXME handle registers 0x80 - 0x8C the bios region locks */
 
        /* For write accesses caches are useless */
-       window->virt = ioremap_nocache(window->phys, window->size);
+       window->virt = ioremap(window->phys, window->size);
        if (!window->virt) {
                printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
                        window->phys, window->size);
index c9b7b4d5a9230634cc3dc8628782d828e10108b8..460494212f6a7bc5529b36a357d28bc68a4eb358 100644 (file)
@@ -191,7 +191,7 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev,
        /* FIXME handle registers 0x80 - 0x8C the bios region locks */
 
        /* For write accesses caches are useless */
-       window->virt = ioremap_nocache(window->phys, window->size);
+       window->virt = ioremap(window->phys, window->size);
        if (!window->virt) {
                printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
                        window->phys, window->size);
index 5c27c6994896011ec1bd28ef0827c0d64e02016c..85e14150a07306809c753ae81b6f560f4dd3c64f 100644 (file)
@@ -249,7 +249,7 @@ static int __init esb2rom_init_one(struct pci_dev *pdev,
        }
 
        /* Map the firmware hub into my address space. */
-       window->virt = ioremap_nocache(window->phys, window->size);
+       window->virt = ioremap(window->phys, window->size);
        if (!window->virt) {
                printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
                        window->phys, window->size);
index 6b989f391baac0dafff16ffe178803d2ecdf7f9c..fda72c5fd8f93adab3903ad7da0f48d664f3ef27 100644 (file)
@@ -184,7 +184,7 @@ static int __init ichxrom_init_one(struct pci_dev *pdev,
        }
 
        /* Map the firmware hub into my address space. */
-       window->virt = ioremap_nocache(window->phys, window->size);
+       window->virt = ioremap(window->phys, window->size);
        if (!window->virt) {
                printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
                        window->phys, window->size);
index 69503aef981e6f9e059463d25cb958b6f35fd2e8..d67b845b0e896617262343ec90888a1452726f24 100644 (file)
@@ -133,7 +133,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
        if (win_len < (CS0_START + CS0_SIZE))
                return -ENXIO;
 
-       p->csr_base = ioremap_nocache(csr_phys, csr_len);
+       p->csr_base = ioremap(csr_phys, csr_len);
        if (!p->csr_base)
                return -ENOMEM;
 
@@ -152,7 +152,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p)
        p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
        p->map.phys = win_phys + CS0_START;
        p->map.size = CS0_SIZE;
-       p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
+       p->map.virt = ioremap(p->map.phys, p->map.size);
        if (!p->map.virt) {
                err = -ENOMEM;
                goto release;
index 0eeadfeb620da2c3df99d3c24c69088e5eea83b0..832b880d1aaf82930de45ee706a949b72df59a1c 100644 (file)
@@ -78,7 +78,7 @@ static int __init init_l440gx(void)
                return -ENODEV;
        }
 
-       l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
+       l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
 
        if (!l440gx_map.virt) {
                printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
index abc52b70bb004ed9c46757fc7262913c259745b9..0bb651624f0578c1ae53b8ffa43af7e8acd0dba3 100644 (file)
@@ -82,10 +82,10 @@ static int __init init_netsc520(void)
        printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
                        (unsigned long long)netsc520_map.size,
                        (unsigned long long)netsc520_map.phys);
-       netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
+       netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
 
        if (!netsc520_map.virt) {
-               printk("Failed to ioremap_nocache\n");
+               printk("Failed to ioremap\n");
                return -EIO;
        }
 
index 50046d497398e331d68e5b7c62c216d7e37df70c..7d349874ffeb6a5546d66761cd86fe0d2cf0e536 100644 (file)
@@ -176,7 +176,7 @@ static int __init nettel_init(void)
 #endif
        int rc = 0;
 
-       nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
+       nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
        if (nettel_mmcrp == NULL) {
                printk("SNAPGEAR: failed to disable MMCR cache??\n");
                return(-EIO);
@@ -217,7 +217,7 @@ static int __init nettel_init(void)
        __asm__ ("wbinvd");
 
        nettel_amd_map.phys = amdaddr;
-       nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+       nettel_amd_map.virt = ioremap(amdaddr, maxsize);
        if (!nettel_amd_map.virt) {
                printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
                iounmap(nettel_mmcrp);
@@ -303,7 +303,7 @@ static int __init nettel_init(void)
        /* Probe for the size of the first Intel flash */
        nettel_intel_map.size = maxsize;
        nettel_intel_map.phys = intel0addr;
-       nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+       nettel_intel_map.virt = ioremap(intel0addr, maxsize);
        if (!nettel_intel_map.virt) {
                printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
                rc = -EIO;
@@ -337,7 +337,7 @@ static int __init nettel_init(void)
        iounmap(nettel_intel_map.virt);
 
        nettel_intel_map.size = maxsize;
-       nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+       nettel_intel_map.virt = ioremap(intel0addr, maxsize);
        if (!nettel_intel_map.virt) {
                printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
                rc = -EIO;
index 9a49f8a06fb886c4cbf0850f304e51336a8ec543..377ef0fc4e3efd9e5a45f4e5ee4a7d1c2f42fc93 100644 (file)
@@ -94,7 +94,7 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
        map->map.write = mtd_pci_write8,
 
        map->map.size     = 0x00800000;
-       map->base         = ioremap_nocache(pci_resource_start(dev, 0),
+       map->base         = ioremap(pci_resource_start(dev, 0),
                                            pci_resource_len(dev, 0));
 
        if (!map->base)
@@ -188,7 +188,7 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
        map->map.read = mtd_pci_read32,
        map->map.write = mtd_pci_write32,
        map->map.size     = len;
-       map->base         = ioremap_nocache(base, len);
+       map->base         = ioremap(base, len);
 
        if (!map->base)
                return -ENOMEM;
index 03af2df90d47567b8dd9436aba5a4ebc3f85977c..9902b37e18b4ec6e42686523d0dd16a10ba7fb5b 100644 (file)
@@ -174,8 +174,8 @@ static void sc520cdp_setup_par(void)
        int i, j;
 
        /* map in SC520's MMCR area */
-       mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
-       if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
+       mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+       if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
                /* force physical address fields to BIOS defaults: */
                for(i = 0; i < NUM_FLASH_BANKS; i++)
                        sc520cdp_map[i].phys = par_table[i].default_address;
@@ -225,10 +225,10 @@ static int __init init_sc520cdp(void)
                        (unsigned long long)sc520cdp_map[i].size,
                        (unsigned long long)sc520cdp_map[i].phys);
 
-               sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
+               sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
 
                if (!sc520cdp_map[i].virt) {
-                       printk("Failed to ioremap_nocache\n");
+                       printk("Failed to ioremap\n");
                        for (j = 0; j < i; j++) {
                                if (mymtd[j]) {
                                        map_destroy(mymtd[j]);
index 2afb253bf456838de1a1bbb56b6c8ef84db5b3a1..57303f904bc1200dce8b523d9ecd65162e523828 100644 (file)
@@ -152,7 +152,7 @@ static int scb2_flash_probe(struct pci_dev *dev,
        }
 
        /* remap the IO window (w/o caching) */
-       scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+       scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
        if (!scb2_ioaddr) {
                printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
                if (!region_fail)
index 6cfc8783c0e5e9a8fb03065186a8742ad865eff4..70d6e865f55512087916f850d116284049905e99 100644 (file)
@@ -56,10 +56,10 @@ static int __init init_ts5500_map(void)
 {
        int rc = 0;
 
-       ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+       ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
 
        if (!ts5500_map.virt) {
-               printk(KERN_ERR "Failed to ioremap_nocache\n");
+               printk(KERN_ERR "Failed to ioremap\n");
                rc = -EIO;
                goto err2;
        }
index edf94ee54ec7fb4b7bdae3ec8ff730284d248bef..aa9368bf7a0c8e48530da90df282fe0a1d8a1ed7 100644 (file)
@@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
        unsigned long timeout;
        u32 syscfg;
 
-       if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+       if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
            state == FL_VERIFYING_ERASE) {
                int i = 21;
                unsigned int intr_flags = ONENAND_INT_MASTER;
 
                switch (state) {
-               case FL_RESETING:
+               case FL_RESETTING:
                        intr_flags |= ONENAND_INT_RESET;
                        break;
                case FL_PREPARING_ERASE:
@@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
        struct dma_async_tx_descriptor *tx;
        dma_cookie_t cookie;
 
-       tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+       tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
+                                      DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
        if (!tx) {
                dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
                return -EIO;
@@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
         * context fallback to PIO mode.
         */
        if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
-           count < 384 || in_interrupt() || oops_in_progress )
+           count < 384 || in_interrupt() || oops_in_progress)
                goto out_copy;
 
        xtra = count & 3;
@@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
         * context fallback to PIO mode.
         */
        if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
-           count < 384 || in_interrupt() || oops_in_progress )
+           count < 384 || in_interrupt() || oops_in_progress)
                goto out_copy;
 
        dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
@@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
                 c->gpmc_cs, c->phys_base, c->onenand.base,
                 c->dma_chan ? "DMA" : "PIO");
 
-       if ((r = onenand_scan(&c->mtd, 1)) < 0)
+       r = onenand_scan(&c->mtd, 1);
+       if (r < 0)
                goto err_release_dma;
 
        freq = omap2_onenand_get_freq(c->onenand.version_id);
index 77bd32a683e180f4f438d56e382cecb1ac10d967..85640ee11c8688b55d16e40ef8802fead6e7eeaf 100644 (file)
@@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
 
                /* Exit OTP access mode */
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETTING);
 
                status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
                status &= 0x60;
@@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
 
        /* Exit OTP access mode */
        this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
 
        return ret;
 }
@@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
 
        /* Exit OTP access mode */
        this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
 
        return ret;
 }
@@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
 
                /* Exit OTP access mode */
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETTING);
        } else {
                ops.mode = MTD_OPS_PLACE_OOB;
                ops.ooblen = len;
@@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
                this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
 
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETTING);
 
                printk(KERN_INFO "Die %d boundary: %d%s\n", die,
                       this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
        ret = this->wait(mtd, FL_WRITING);
 out:
        this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
        if (!ret)
                /* Recalculate device size on boundary change*/
                flexonenand_get_size(mtd);
@@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
        /* Reset OneNAND to read default register values */
        this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
        /* Wait reset */
-       this->wait(mtd, FL_RESETING);
+       this->wait(mtd, FL_RESETTING);
 
        /* Restore system configuration 1 */
        this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
index 55e5536a5850d17c9c9c5565f5ebed0a2d62b6ba..beb7987e4c2b7ba50c7beae4b95a4b99d48fe410 100644 (file)
@@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
 normal:
        if (count != mtd->writesize) {
                /* Copy the bufferram to memory to prevent unaligned access */
-               memcpy(this->page_buf, p, mtd->writesize);
-               p = this->page_buf + offset;
+               memcpy_fromio(this->page_buf, p, mtd->writesize);
+               memcpy(buffer, this->page_buf + offset, count);
+       } else {
+               memcpy_fromio(buffer, p, count);
        }
 
-       memcpy(buffer, p, count);
-
        return 0;
 }
 
index e10b76089048e06dbfd83a41f05ac14fca01ceb7..75eb3e97fae3457333891b43fc893045c2a2440a 100644 (file)
@@ -404,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev)
                goto out1;
        }
 
-       ctx->base = ioremap_nocache(r->start, 0x1000);
+       ctx->base = ioremap(r->start, 0x1000);
        if (!ctx->base) {
                dev_err(&pdev->dev, "cannot remap NAND memory area\n");
                ret = -ENODEV;
index 3a36285a8d8a191948c603957ee7629c1cf7fc48..f6c7102a1e3253f79cbc1f6c0bf54858661c7564 100644 (file)
@@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
 /* Prepare CDMA descriptor. */
 static void
 cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
-                              char nf_mem, u32 flash_ptr, char *mem_ptr,
-                              char *ctrl_data_ptr, u16 ctype)
+                              char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+                                  dma_addr_t ctrl_data_ptr, u16 ctype)
 {
        struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
 
@@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
        cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
        cdma_desc->command_flags  |= CDMA_CF_INT;
 
-       cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+       cdma_desc->memory_pointer = mem_ptr;
        cdma_desc->status = 0;
        cdma_desc->sync_flag_pointer = 0;
        cdma_desc->sync_arguments = 0;
 
        cdma_desc->command_type = ctype;
-       cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+       cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
 }
 
 static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
@@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
        }
 
        cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
-                                      (void *)dma_buf, (void *)dma_ctrl_dat,
-                                      ctype);
+                                      dma_buf, dma_ctrl_dat, ctype);
 
        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
 
@@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page)
 
        cadence_nand_cdma_desc_prepare(cdns_ctrl,
                                       cdns_chip->cs[chip->cur_cs],
-                                      page, NULL, NULL,
+                                      page, 0, 0,
                                       CDMA_CT_ERASE);
        status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
        if (status) {
index d62aa5271753e802f286aa431c61a36e2f6997aa..2f77ee55e1bfcb2f8cf4589b3b2635d001fc66f6 100644 (file)
@@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return ret;
        }
 
-       denali->reg = ioremap_nocache(csr_base, csr_len);
+       denali->reg = ioremap(csr_base, csr_len);
        if (!denali->reg) {
                dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
                return -ENOMEM;
        }
 
-       denali->host = ioremap_nocache(mem_base, mem_len);
+       denali->host = ioremap(mem_base, mem_len);
        if (!denali->host) {
-               dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
+               dev_err(&dev->dev, "Spectra: ioremap failed!");
                ret = -ENOMEM;
                goto out_unmap_reg;
        }
index 1054cc070747e2b31e28bd6a82b3bc0aea89f708..f31fae3a4c6894a5c72498d6b672548c0bff06d2 100644 (file)
@@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev)
                fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
                                  FSL_UPM_WAIT_WRITE_BYTE;
 
-       fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+       fun->io_base = devm_ioremap(&ofdev->dev, io_res.start,
                                            resource_size(&io_res));
        if (!fun->io_base) {
                ret = -ENOMEM;
index 334fe3130285a0dc2556b36acfdac767eb137d36..b9d5d55a5edb9a6e5384a87a1470f168225db58a 100644 (file)
@@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
        struct resources *r = &this->resources;
        int ret;
 
+       ret = pm_runtime_get_sync(this->dev);
+       if (ret < 0)
+               return ret;
+
        ret = gpmi_reset_block(r->gpmi_regs, false);
        if (ret)
                goto err_out;
@@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this)
         */
        writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 
-       return 0;
 err_out:
+       pm_runtime_mark_last_busy(this->dev);
+       pm_runtime_put_autosuspend(this->dev);
        return ret;
 }
 
@@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev)
                return ret;
        }
 
+       /* Set flag to get timing setup restored for next exec_op */
+       if (this->hw.clk_rate)
+               this->hw.must_apply_timings = true;
+
        /* re-init the BCH registers */
        ret = bch_set_geometry(this);
        if (ret) {
index 9e63800f768a8f68613efcbdba70ee58bfcff920..3ba73f18841f9b68da3e83a007530937cea63e98 100644 (file)
@@ -37,6 +37,7 @@
 /* Max ECC buffer length */
 #define FMC2_MAX_ECC_BUF_LEN           (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
 
+#define FMC2_TIMEOUT_US                        1000
 #define FMC2_TIMEOUT_MS                        1000
 
 /* Timings */
@@ -53,6 +54,8 @@
 #define FMC2_PMEM                      0x88
 #define FMC2_PATT                      0x8c
 #define FMC2_HECCR                     0x94
+#define FMC2_ISR                       0x184
+#define FMC2_ICR                       0x188
 #define FMC2_CSQCR                     0x200
 #define FMC2_CSQCFGR1                  0x204
 #define FMC2_CSQCFGR2                  0x208
 #define FMC2_PATT_ATTHIZ(x)            (((x) & 0xff) << 24)
 #define FMC2_PATT_DEFAULT              0x0a0a0a0a
 
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF                  BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF                 BIT(1)
+
 /* Register: FMC2_CSQCR */
 #define FMC2_CSQCR_CSQSTART            BIT(0)
 
@@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
                stm32_fmc2_set_buswidth_16(fmc2, true);
 }
 
+static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+       struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+       const struct nand_sdr_timings *timings;
+       u32 isr, sr;
+
+       /* Check if there is no pending requests to the NAND flash */
+       if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+                                             sr & FMC2_SR_NWRF, 1,
+                                             FMC2_TIMEOUT_US))
+               dev_warn(fmc2->dev, "Waitrdy timeout\n");
+
+       /* Wait tWB before R/B# signal is low */
+       timings = nand_get_sdr_timings(&chip->data_interface);
+       ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+       /* R/B# signal is low, clear high level flag */
+       writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+
+       /* Wait R/B# signal is high */
+       return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+                                                isr, isr & FMC2_ISR_IHLF,
+                                                5, 1000 * timeout_ms);
+}
+
 static int stm32_fmc2_exec_op(struct nand_chip *chip,
                              const struct nand_operation *op,
                              bool check_only)
@@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
                        break;
 
                case NAND_OP_WAITRDY_INSTR:
-                       ret = nand_soft_waitrdy(chip,
-                                               instr->ctx.waitrdy.timeout_ms);
+                       ret = stm32_fmc2_waitrdy(chip,
+                                                instr->ctx.waitrdy.timeout_ms);
                        break;
                }
        }
index 4744bf94ad9a912f5dd7a0cdd3478c4c27fac853..b9f272408c4d5f57b61dcc781e83c70fc76337e1 100644 (file)
@@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl,
 
        /* FTL can contain -1 entries that are by default filled with bits */
        if (block == -1) {
-               memset(buffer, 0xFF, SM_SECTOR_SIZE);
+               if (buffer)
+                       memset(buffer, 0xFF, SM_SECTOR_SIZE);
                return 0;
        }
 
index f4afe123e9dcd38be987dd79be7f20309a24e620..b0cd443dd758d1a098b8885ef034c71e41b4dacd 100644 (file)
@@ -2124,6 +2124,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
        if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
                return 0;
 
+       nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
        return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
 }
 
@@ -4596,6 +4598,7 @@ static void sst_set_default_init(struct spi_nor *nor)
 static void st_micron_set_default_init(struct spi_nor *nor)
 {
        nor->flags |= SNOR_F_HAS_LOCK;
+       nor->flags &= ~SNOR_F_HAS_16BIT_SR;
        nor->params.quad_enable = NULL;
        nor->params.set_4byte = st_micron_set_4byte;
 }
@@ -4768,9 +4771,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
 
 static void spansion_post_sfdp_fixups(struct spi_nor *nor)
 {
-       struct mtd_info *mtd = &nor->mtd;
-
-       if (mtd->size <= SZ_16M)
+       if (nor->params.size <= SZ_16M)
                return;
 
        nor->flags |= SNOR_F_4B_OPCODES;
index fcb7c2f7f001b310075a8774ae67d8ea9e879575..48d5ec770b94242fddb71a02effab6fc1ee988e8 100644 (file)
@@ -2272,9 +2272,6 @@ static void bond_miimon_commit(struct bonding *bond)
                        } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                                /* make it immediately active */
                                bond_set_active_slave(slave);
-                       } else if (slave != primary) {
-                               /* prevent it from being the active one */
-                               bond_set_backup_slave(slave);
                        }
 
                        slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
@@ -3702,32 +3699,35 @@ static int bond_neigh_init(struct neighbour *n)
        const struct net_device_ops *slave_ops;
        struct neigh_parms parms;
        struct slave *slave;
-       int ret;
+       int ret = 0;
 
-       slave = bond_first_slave(bond);
+       rcu_read_lock();
+       slave = bond_first_slave_rcu(bond);
        if (!slave)
-               return 0;
+               goto out;
        slave_ops = slave->dev->netdev_ops;
        if (!slave_ops->ndo_neigh_setup)
-               return 0;
-
-       parms.neigh_setup = NULL;
-       parms.neigh_cleanup = NULL;
-       ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
-       if (ret)
-               return ret;
+               goto out;
 
-       /* Assign slave's neigh_cleanup to neighbour in case cleanup is called
-        * after the last slave has been detached.  Assumes that all slaves
-        * utilize the same neigh_cleanup (true at this writing as only user
-        * is ipoib).
+       /* TODO: find another way [1] to implement this.
+        * Passing a zeroed structure is fragile,
+        * but at least we do not pass garbage.
+        *
+        * [1] One way would be that ndo_neigh_setup() never touch
+        *     struct neigh_parms, but propagate the new neigh_setup()
+        *     back to ___neigh_create() / neigh_parms_alloc()
         */
-       n->parms->neigh_cleanup = parms.neigh_cleanup;
+       memset(&parms, 0, sizeof(parms));
+       ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
 
-       if (!parms.neigh_setup)
-               return 0;
+       if (ret)
+               goto out;
 
-       return parms.neigh_setup(n);
+       if (parms.neigh_setup)
+               ret = parms.neigh_setup(n);
+out:
+       rcu_read_unlock();
+       return ret;
 }
 
 /* The bonding ndo_neigh_setup is called at init time beofre any
index c8e1a04ba384dfbea3c8e1d191cc1c07cbf455e8..9df2007b5e56b50e7a5f19368533239c80c78253 100644 (file)
@@ -1302,7 +1302,7 @@ static int at91_can_probe(struct platform_device *pdev)
                goto exit_put;
        }
 
-       addr = ioremap_nocache(res->start, resource_size(res));
+       addr = ioremap(res->start, resource_size(res));
        if (!addr) {
                err = -ENOMEM;
                goto exit_release;
index b9047d8110d5a827e4f2629cfb3ac3a7b8fc1794..194c86e0f340fb873965ce05f543f09b4c314482 100644 (file)
@@ -175,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev)
                        err = -EBUSY;
                        goto exit;
                }
-               base = ioremap_nocache(mem[idx], iosize);
+               base = ioremap(mem[idx], iosize);
                if (!base) {
                        err = -ENOMEM;
                        goto exit_release;
index a929cdda9ab23af08838c08e27247cb408aa6102..94d10ec954a05982c0934e27a2743579d174e5fc 100644 (file)
@@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv
                (&priv->regs->mb[bank][priv->mb_size * mb_index]);
 }
 
+static int flexcan_low_power_enter_ack(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->regs;
+       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+       while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+               udelay(10);
+
+       if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int flexcan_low_power_exit_ack(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->regs;
+       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+       while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+               udelay(10);
+
+       if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
@@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
 static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
-       unsigned int ackval;
        u32 reg_mcr;
 
        reg_mcr = priv->read(&regs->mcr);
@@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
        regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
                           1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
 
-       /* get stop acknowledgment */
-       if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
-                                    ackval, ackval & (1 << priv->stm.ack_bit),
-                                    0, FLEXCAN_TIMEOUT_US))
-               return -ETIMEDOUT;
-
-       return 0;
+       return flexcan_low_power_enter_ack(priv);
 }
 
 static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
-       unsigned int ackval;
        u32 reg_mcr;
 
        /* remove stop request */
        regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
                           1 << priv->stm.req_bit, 0);
 
-       /* get stop acknowledgment */
-       if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
-                                    ackval, !(ackval & (1 << priv->stm.ack_bit)),
-                                    0, FLEXCAN_TIMEOUT_US))
-               return -ETIMEDOUT;
 
        reg_mcr = priv->read(&regs->mcr);
        reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
        priv->write(reg_mcr, &regs->mcr);
 
-       return 0;
+       return flexcan_low_power_exit_ack(priv);
 }
 
 static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
@@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
 static int flexcan_chip_enable(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
-       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
        u32 reg;
 
        reg = priv->read(&regs->mcr);
        reg &= ~FLEXCAN_MCR_MDIS;
        priv->write(reg, &regs->mcr);
 
-       while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               udelay(10);
-
-       if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
-               return -ETIMEDOUT;
-
-       return 0;
+       return flexcan_low_power_exit_ack(priv);
 }
 
 static int flexcan_chip_disable(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
-       unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
        u32 reg;
 
        reg = priv->read(&regs->mcr);
        reg |= FLEXCAN_MCR_MDIS;
        priv->write(reg, &regs->mcr);
 
-       while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               udelay(10);
-
-       if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
-               return -ETIMEDOUT;
-
-       return 0;
+       return flexcan_low_power_enter_ack(priv);
 }
 
 static int flexcan_chip_freeze(struct flexcan_priv *priv)
@@ -1722,6 +1723,9 @@ static int __maybe_unused flexcan_resume(struct device *device)
                netif_start_queue(dev);
                if (device_may_wakeup(device)) {
                        disable_irq_wake(dev->irq);
+                       err = flexcan_exit_stop_mode(priv);
+                       if (err)
+                               return err;
                } else {
                        err = pm_runtime_force_resume(device);
                        if (err)
@@ -1767,14 +1771,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
 {
        struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
-       int err;
 
-       if (netif_running(dev) && device_may_wakeup(device)) {
+       if (netif_running(dev) && device_may_wakeup(device))
                flexcan_enable_wakeup_irq(priv, false);
-               err = flexcan_exit_stop_mode(priv);
-               if (err)
-                       return err;
-       }
 
        return 0;
 }
index 3db619209fe199ee8f39cb59987e439c0c50f99c..eacd428e07e9f32c9c014b877d0d0105d8b4ffc9 100644 (file)
 #define TCAN4X5X_MODE_STANDBY BIT(6)
 #define TCAN4X5X_MODE_NORMAL BIT(7)
 
+#define TCAN4X5X_DISABLE_WAKE_MSK      (BIT(31) | BIT(30))
+#define TCAN4X5X_DISABLE_INH_MSK       BIT(9)
+
 #define TCAN4X5X_SW_RESET BIT(2)
 
 #define TCAN4X5X_MCAN_CONFIGURED BIT(5)
@@ -164,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
        }
 }
 
+static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
+{
+       int ret = 0;
+
+       if (priv->reset_gpio) {
+               gpiod_set_value(priv->reset_gpio, 1);
+
+               /* tpulse_width minimum 30us */
+               usleep_range(30, 100);
+               gpiod_set_value(priv->reset_gpio, 0);
+       } else {
+               ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG,
+                                  TCAN4X5X_SW_RESET);
+               if (ret)
+                       return ret;
+       }
+
+       usleep_range(700, 1000);
+
+       return ret;
+}
+
 static int regmap_spi_gather_write(void *context, const void *reg,
                                   size_t reg_len, const void *val,
                                   size_t val_len)
@@ -338,15 +363,34 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
        return ret;
 }
 
+static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
+{
+       struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+       return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+                                 TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
+}
+
+static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
+{
+       struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+       return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+                                 TCAN4X5X_DISABLE_INH_MSK, 0x01);
+}
+
 static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
 {
        struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+       int ret;
 
        tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
                                                    GPIOD_OUT_HIGH);
        if (IS_ERR(tcan4x5x->device_wake_gpio)) {
-               dev_err(cdev->dev, "device-wake gpio not defined\n");
-               return -EINVAL;
+               if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               tcan4x5x_disable_wake(cdev);
        }
 
        tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@@ -354,16 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
        if (IS_ERR(tcan4x5x->reset_gpio))
                tcan4x5x->reset_gpio = NULL;
 
+       ret = tcan4x5x_reset(tcan4x5x);
+       if (ret)
+               return ret;
+
        tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
                                                              "device-state",
                                                              GPIOD_IN);
-       if (IS_ERR(tcan4x5x->device_state_gpio))
+       if (IS_ERR(tcan4x5x->device_state_gpio)) {
                tcan4x5x->device_state_gpio = NULL;
-
-       tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
-                                                     "vsup");
-       if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+               tcan4x5x_disable_state(cdev);
+       }
 
        return 0;
 }
@@ -398,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        if (!priv)
                return -ENOMEM;
 
+       priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+       if (PTR_ERR(priv->power) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       else
+               priv->power = NULL;
+
        mcan_class->device_data = priv;
 
        m_can_class_get_clocks(mcan_class);
@@ -428,10 +479,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
 
        spi_set_drvdata(spi, priv);
 
-       ret = tcan4x5x_parse_config(mcan_class);
-       if (ret)
-               goto out_clk;
-
        /* Configure the SPI bus */
        spi->bits_per_word = 32;
        ret = spi_setup(spi);
@@ -441,7 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
                                        &spi->dev, &tcan4x5x_regmap);
 
-       tcan4x5x_power_enable(priv->power, 1);
+       ret = tcan4x5x_power_enable(priv->power, 1);
+       if (ret)
+               goto out_clk;
+
+       ret = tcan4x5x_parse_config(mcan_class);
+       if (ret)
+               goto out_power;
+
+       ret = tcan4x5x_init(mcan_class);
+       if (ret)
+               goto out_power;
 
        ret = m_can_class_register(mcan_class);
        if (ret)
index 8caf7af0dee2040b9daa615447eae6502459a3ec..99101d7027a8fcc63336ab9ca74087d00353d9c8 100644 (file)
@@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
        struct net_device *dev = napi->dev;
        struct mscan_regs __iomem *regs = priv->reg_base;
        struct net_device_stats *stats = &dev->stats;
-       int npackets = 0;
-       int ret = 1;
+       int work_done = 0;
        struct sk_buff *skb;
        struct can_frame *frame;
        u8 canrflg;
 
-       while (npackets < quota) {
+       while (work_done < quota) {
                canrflg = in_8(&regs->canrflg);
                if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
                        break;
@@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
 
                stats->rx_packets++;
                stats->rx_bytes += frame->can_dlc;
-               npackets++;
+               work_done++;
                netif_receive_skb(skb);
        }
 
-       if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
-               napi_complete(&priv->napi);
-               clear_bit(F_RX_PROGRESS, &priv->flags);
-               if (priv->can.state < CAN_STATE_BUS_OFF)
-                       out_8(&regs->canrier, priv->shadow_canrier);
-               ret = 0;
+       if (work_done < quota) {
+               if (likely(napi_complete_done(&priv->napi, work_done))) {
+                       clear_bit(F_RX_PROGRESS, &priv->flags);
+                       if (priv->can.state < CAN_STATE_BUS_OFF)
+                               out_8(&regs->canrier, priv->shadow_canrier);
+               }
        }
-       return ret;
+       return work_done;
 }
 
 static irqreturn_t mscan_isr(int irq, void *dev_id)
index 1c4d32d1a542e7e78f9e318443e4d26bf6841970..d513fac50718542061f786a75191fe1f25e711bd 100644 (file)
@@ -130,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
                        err = -EBUSY;
                        goto exit;
                }
-               base = ioremap_nocache(mem[idx], iosize);
+               base = ioremap(mem[idx], iosize);
                if (!base) {
                        err = -ENOMEM;
                        goto exit_release;
index ff5a96f3408585cde10893135327575125e7a164..d7222ba466225cd423c9974fd65c76dac965ad03 100644 (file)
@@ -229,7 +229,7 @@ static int sp_probe(struct platform_device *pdev)
                                     resource_size(res_mem), DRV_NAME))
                return -EBUSY;
 
-       addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+       addr = devm_ioremap(&pdev->dev, res_mem->start,
                                    resource_size(res_mem));
        if (!addr)
                return -ENOMEM;
index 2e57122f02fb079c94e8e1fea197fe40286c133d..2f5c287eac95761f1a57110078875f6615b944f4 100644 (file)
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
  */
 static void slcan_write_wakeup(struct tty_struct *tty)
 {
-       struct slcan *sl = tty->disc_data;
+       struct slcan *sl;
+
+       rcu_read_lock();
+       sl = rcu_dereference(tty->disc_data);
+       if (!sl)
+               goto out;
 
        schedule_work(&sl->tx_work);
+out:
+       rcu_read_unlock();
 }
 
 /* Send a can_frame to a TTY queue. */
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
                return;
 
        spin_lock_bh(&sl->lock);
-       tty->disc_data = NULL;
+       rcu_assign_pointer(tty->disc_data, NULL);
        sl->tty = NULL;
        spin_unlock_bh(&sl->lock);
 
+       synchronize_rcu();
        flush_work(&sl->tx_work);
 
        /* Flush network side */
index 8242fb287cbbe2c5039388de249006629636f12a..d1ddf763b188b9bca02f5fb04e8a8626cac0a9aa 100644 (file)
@@ -777,7 +777,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
                goto platform_resource_failed;
        card->dpram_phys = pres->start;
        card->dpram_size = resource_size(pres);
-       card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+       card->dpram = ioremap(card->dpram_phys, card->dpram_size);
        if (!card->dpram) {
                dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
                goto ioremap_failed;
index 2f74f6704c1286dc678c323aceaa34f0c64396ab..a4b4b742c80c322a5759fdb7b62c4f57368462ac 100644 (file)
@@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf,
                             GS_USB_BREQ_HOST_FORMAT,
                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
                             1,
-                            intf->altsetting[0].desc.bInterfaceNumber,
+                            intf->cur_altsetting->desc.bInterfaceNumber,
                             hconf,
                             sizeof(*hconf),
                             1000);
@@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf,
                             GS_USB_BREQ_DEVICE_CONFIG,
                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
                             1,
-                            intf->altsetting[0].desc.bInterfaceNumber,
+                            intf->cur_altsetting->desc.bInterfaceNumber,
                             dconf,
                             sizeof(*dconf),
                             1000);
index 5fc0be564274375f3d5c579521a2d3b89ecd4a88..7ab87a758754543a189672ad886ce8dbda2f19e3 100644 (file)
@@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
        struct usb_endpoint_descriptor *ep;
        int i;
 
-       iface_desc = &dev->intf->altsetting[0];
+       iface_desc = dev->intf->cur_altsetting;
 
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                ep = &iface_desc->endpoint[i].desc;
index 07d2f3aa2c026c99e0358266ff781e8d893c75a5..1b9957f12459a9780ac9e94a73fa773cca6483de 100644 (file)
@@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
        struct kvaser_cmd *cmd;
        int err;
 
-       cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
        if (!cmd)
                return -ENOMEM;
 
@@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
        struct kvaser_cmd *cmd;
        int rc;
 
-       cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd)
                return -ENOMEM;
 
@@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
        struct kvaser_cmd *cmd;
        int rc;
 
-       cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd)
                return -ENOMEM;
 
@@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
        struct usb_endpoint_descriptor *endpoint;
        int i;
 
-       iface_desc = &dev->intf->altsetting[0];
+       iface_desc = dev->intf->cur_altsetting;
 
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                endpoint = &iface_desc->endpoint[i].desc;
index 464af939cd8af53f8f00ac84268749d1ae056113..c1dbab8c896d593fb6c4da48a2757cad4cdf0ae3 100644 (file)
@@ -60,6 +60,8 @@ enum xcan_reg {
        XCAN_TXMSG_BASE_OFFSET  = 0x0100, /* TX Message Space */
        XCAN_RXMSG_BASE_OFFSET  = 0x1100, /* RX Message Space */
        XCAN_RXMSG_2_BASE_OFFSET        = 0x2100, /* RX Message Space */
+       XCAN_AFR_2_MASK_OFFSET  = 0x0A00, /* Acceptance Filter MASK */
+       XCAN_AFR_2_ID_OFFSET    = 0x0A04, /* Acceptance Filter ID */
 };
 
 #define XCAN_FRAME_ID_OFFSET(frame_base)       ((frame_base) + 0x00)
@@ -1809,6 +1811,11 @@ static int xcan_probe(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
 
+       if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
+               priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
+               priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
+       }
+
        netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
                   priv->reg_base, ndev->irq, priv->can.clock.freq,
                   hw_tx_max, priv->tx_max);
index 36828f2100307a099c48a06975194dd6cf79419f..edacacfc9365ebfae88cb29f71a9c59002393a5f 100644 (file)
@@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
         * frames should be flooded or not.
         */
        b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
-       mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN;
+       mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
        b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
 }
 
@@ -526,6 +526,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
 
        cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
 
+       b53_br_egress_floods(ds, port, true, true);
+
        if (dev->ops->irq_enable)
                ret = dev->ops->irq_enable(dev, port);
        if (ret)
@@ -641,6 +643,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
        b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
 
        b53_brcm_hdr_setup(dev->ds, port);
+
+       b53_br_egress_floods(dev->ds, port, true, true);
 }
 
 static void b53_enable_mib(struct b53_device *dev)
@@ -1821,19 +1825,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port,
        struct b53_device *dev = ds->priv;
        u16 uc, mc;
 
-       b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc);
+       b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
        if (unicast)
                uc |= BIT(port);
        else
                uc &= ~BIT(port);
-       b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc);
+       b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+
+       b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+       if (multicast)
+               mc |= BIT(port);
+       else
+               mc &= ~BIT(port);
+       b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
 
-       b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc);
+       b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
        if (multicast)
                mc |= BIT(port);
        else
                mc &= ~BIT(port);
-       b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc);
+       b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
 
        return 0;
 
index e43040c9f9ee7f1a7e4ea9de54661a220adb357e..3e8635311d0de1517d6d63b5dc4556cf30ead192 100644 (file)
@@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
                /* Force link status for IMP port */
                reg = core_readl(priv, offset);
-               reg |= (MII_SW_OR | LINK_STS);
+               reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
                core_writel(priv, reg, offset);
 
                /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
index f3f0c3f07391a231a34b561b77b97806a737202d..1962c8330daa06cad2f567858a294b9ea86aa8a9 100644 (file)
@@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
                return -EINVAL;
        }
 
-       ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+       ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
 
        /* Locate the first rule available */
        if (fs->location == RX_CLS_LOC_ANY)
@@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
 
                if (rule->fs.flow_type != fs->flow_type ||
                    rule->fs.ring_cookie != fs->ring_cookie ||
-                   rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+                   rule->fs.h_ext.data[0] != fs->h_ext.data[0])
                        continue;
 
                switch (fs->flow_type & ~FLOW_EXT) {
@@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
                return -EINVAL;
        }
 
-       ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+       ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
 
        layout = &udf_tcpip6_layout;
        slice_num = bcm_sf2_get_slice_number(layout, 0);
index 120a65d3e3efac3dfb2b323f1a841d55c72f966b..b016cc205f81f02c1c27a43a5f7618770c7a15ae 100644 (file)
@@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
 {
        u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
 
+       /* Use the default high priority for management frames sent to
+        * the CPU.
+        */
+       port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
        return mv88e6390_g1_monitor_write(chip, ptr, port);
 }
 
index bc5a6b2bb1e48caa933e89aa77f382f33d62cc3e..5324c6f4ae902355b18e3b3de05e5d7918556e99 100644 (file)
 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST         0x2000
 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST          0x2100
 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST             0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI     0x00e0
 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK                        0x00ff
 
 /* Offset 0x1C: Global Control 2 */
index 7fe256c5739d0c56489af4246ad028247c260878..0b43c650e100f7d19abce9963effeedbd46abcc9 100644 (file)
@@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
 }
 
 static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
-                                   phy_interface_t mode)
+                                   phy_interface_t mode, bool force)
 {
        u8 lane;
        u16 cmode;
@@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                cmode = 0;
        }
 
-       /* cmode doesn't change, nothing to do for us */
-       if (cmode == chip->ports[port].cmode)
+       /* cmode doesn't change, nothing to do for us unless forced */
+       if (cmode == chip->ports[port].cmode && !force)
                return 0;
 
        lane = mv88e6xxx_serdes_get_lane(chip, port);
@@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
        if (port != 9 && port != 10)
                return -EOPNOTSUPP;
 
-       return mv88e6xxx_port_set_cmode(chip, port, mode);
+       return mv88e6xxx_port_set_cmode(chip, port, mode, false);
 }
 
 int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
@@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                break;
        }
 
-       return mv88e6xxx_port_set_cmode(chip, port, mode);
+       return mv88e6xxx_port_set_cmode(chip, port, mode, false);
 }
 
 static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
@@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       return mv88e6xxx_port_set_cmode(chip, port, mode);
+       return mv88e6xxx_port_set_cmode(chip, port, mode, true);
 }
 
 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
index 0031ca81434688fad12af92f05336a664173fc42..6f9804093150fad30ec19b04fe43a68c5beb0e0c 100644 (file)
@@ -2,6 +2,7 @@
 config NET_DSA_MSCC_FELIX
        tristate "Ocelot / Felix Ethernet switch support"
        depends on NET_DSA && PCI
+       depends on NET_VENDOR_MICROSEMI
        select MSCC_OCELOT_SWITCH
        select NET_DSA_TAG_OCELOT
        help
index a51ac088c0bcafd4274aac22540bd97b62f8d6bb..bb91f3d17cf26b9d91a126bcde283c5dc13f5f80 100644 (file)
@@ -582,7 +582,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
        struct device *dev = &priv->spidev->dev;
        struct device_node *child;
 
-       for_each_child_of_node(ports_node, child) {
+       for_each_available_child_of_node(ports_node, child) {
                struct device_node *phy_node;
                phy_interface_t phy_mode;
                u32 index;
@@ -1569,8 +1569,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
 
        if (enabled) {
                /* Enable VLAN filtering. */
-               tpid  = ETH_P_8021AD;
-               tpid2 = ETH_P_8021Q;
+               tpid  = ETH_P_8021Q;
+               tpid2 = ETH_P_8021AD;
        } else {
                /* Disable VLAN filtering. */
                tpid  = ETH_P_SJA1105;
@@ -1579,9 +1579,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
 
        table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
        general_params = table->entries;
-       /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
-       general_params->tpid = tpid;
        /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+       general_params->tpid = tpid;
+       /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
        general_params->tpid2 = tpid2;
        /* When VLAN filtering is on, we need to at least be able to
         * decode management traffic through the "backup plan".
@@ -1855,7 +1855,7 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
        if (!clone)
                goto out;
 
-       sja1105_ptp_txtstamp_skb(ds, slot, clone);
+       sja1105_ptp_txtstamp_skb(ds, port, clone);
 
 out:
        mutex_unlock(&priv->mgmt_lock);
index 54258a25031db159a0da856a4d5b56ddd28ce1a7..43ab7589d0d0c530d976bec1437b4c94c2b00d55 100644 (file)
@@ -234,7 +234,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
        if (rw == SPI_WRITE)
                priv->info->ptp_cmd_packing(buf, cmd, PACK);
 
-       rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+       rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
                              SJA1105_SIZE_PTP_CMD);
 
        if (rw == SPI_READ)
@@ -659,7 +659,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
        ptp_data->clock = NULL;
 }
 
-void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
                              struct sk_buff *skb)
 {
        struct sja1105_private *priv = ds->priv;
@@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
                goto out;
        }
 
-       rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+       rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
        if (rc < 0) {
                dev_err(ds->dev, "timed out polling for tstamp\n");
                kfree_skb(skb);
index 0d03e13e99092e36c93adcb946f7f0f26f717b1b..63d2311817c474a460ac26218c93dacfe0678aa7 100644 (file)
@@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
        return size;
 }
 
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
 static size_t
 sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
                                         enum packing_op op)
@@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
        sja1105_packing(buf, &entry->mirr_port,   141, 139, size, op);
        sja1105_packing(buf, &entry->vlmarker,    138, 107, size, op);
        sja1105_packing(buf, &entry->vlmask,      106,  75, size, op);
-       sja1105_packing(buf, &entry->tpid        74,  59, size, op);
+       sja1105_packing(buf, &entry->tpid2,        74,  59, size, op);
        sja1105_packing(buf, &entry->ignore2stf,   58,  58, size, op);
-       sja1105_packing(buf, &entry->tpid2,        57,  42, size, op);
+       sja1105_packing(buf, &entry->tpid        57,  42, size, op);
        sja1105_packing(buf, &entry->queue_ts,     41,  41, size, op);
        sja1105_packing(buf, &entry->egrmirrvid,   40,  29, size, op);
        sja1105_packing(buf, &entry->egrmirrpcp,   28,  26, size, op);
index 26b925b5daced04b2eebb353010220d927f2e239..fa6750d973d7b1a28528062b2dbd30f39868f87e 100644 (file)
@@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
        if (admin->cycle_time_extension)
                return -ENOTSUPP;
 
-       if (!ns_to_sja1105_delta(admin->base_time)) {
-               dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
-               return -ERANGE;
-       }
-
        for (i = 0; i < admin->num_entries; i++) {
                s64 delta_ns = admin->entries[i].interval;
                s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
index 80ef3e15bd225aca10c6afa9fb8ed3569499906c..9daef4c8feef2c0c4d2be3cd039e55c2110492aa 100644 (file)
@@ -1791,7 +1791,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
        sdev->pdev = pdev;
        sdev->netdev = dev;
-       sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0),
+       sdev->regs = ioremap(pci_resource_start(pdev, 0),
                                     pci_resource_len(pdev, 0));
        if (!sdev->regs) {
                dev_err(&pdev->dev, "failed to map registers\n");
index 4cd53fc338b5da69a93c01fa4510f868294a5fa2..1671c1f36691d450d6a2c38905659d511ec5fd32 100644 (file)
@@ -1332,10 +1332,10 @@ static int request_and_map(struct platform_device *pdev, const char *name,
                return -EBUSY;
        }
 
-       *ptr = devm_ioremap_nocache(device, region->start,
+       *ptr = devm_ioremap(device, region->start,
                                    resource_size(region));
        if (*ptr == NULL) {
-               dev_err(device, "ioremap_nocache of %s failed!", name);
+               dev_err(device, "ioremap of %s failed!", name);
                return -ENOMEM;
        }
 
index 7c941eba0bc9b30b373ab77d9f6b3bb4667a90ae..0ce37d54ed108f0a34167306247ba94eb9833050 100644 (file)
@@ -72,7 +72,7 @@
 /*****************************************************************************/
 /* ENA adaptive interrupt moderation settings */
 
-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS             196
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS             64
 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS             0
 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION              1
 
index a3250dcf7d53b8adcbeabbc23875c73226bad309..fc96c66b44cb55bbdebb67ce5d6265ff5b7e2351 100644 (file)
@@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev,
                ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
                        ena_dev->intr_delay_resolution;
 
-       if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
-               coalesce->rx_coalesce_usecs =
-                       ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
-                       * ena_dev->intr_delay_resolution;
+       coalesce->rx_coalesce_usecs =
+               ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
+               * ena_dev->intr_delay_resolution;
 
        coalesce->use_adaptive_rx_coalesce =
                ena_com_get_adaptive_moderation_enabled(ena_dev);
@@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev,
 
        ena_update_tx_rings_intr_moderation(adapter);
 
-       if (coalesce->use_adaptive_rx_coalesce) {
-               if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
-                       ena_com_enable_adaptive_moderation(ena_dev);
-               return 0;
-       }
-
        rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
                                                               coalesce->rx_coalesce_usecs);
        if (rc)
@@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev,
 
        ena_update_rx_rings_intr_moderation(adapter);
 
-       if (!coalesce->use_adaptive_rx_coalesce) {
-               if (ena_com_get_adaptive_moderation_enabled(ena_dev))
-                       ena_com_disable_adaptive_moderation(ena_dev);
-       }
+       if (coalesce->use_adaptive_rx_coalesce &&
+           !ena_com_get_adaptive_moderation_enabled(ena_dev))
+               ena_com_enable_adaptive_moderation(ena_dev);
+
+       if (!coalesce->use_adaptive_rx_coalesce &&
+           ena_com_get_adaptive_moderation_enabled(ena_dev))
+               ena_com_disable_adaptive_moderation(ena_dev);
 
        return 0;
 }
index d46a912002ff2cac74029730d449e5a3eb64ad94..948583fdcc2864a8b60ac35b661a5cb27eeb8bcb 100644 (file)
@@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
        struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
        struct ena_ring *tx_ring, *rx_ring;
 
-       u32 tx_work_done;
-       u32 rx_work_done;
+       int tx_work_done;
+       int rx_work_done = 0;
        int tx_budget;
        int napi_comp_call = 0;
        int ret;
@@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
        }
 
        tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
-       rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+       /* On netpoll the budget is zero and the handler should only clean the
+        * tx completions.
+        */
+       if (likely(budget))
+               rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
 
        /* If the device is about to reset or down, avoid unmask
         * the interrupt and return 0 so NAPI won't reschedule
index 1793950f058270d805183635f9ce8cec1e993052..307e402db8c90d8fd00347aa9f51813faeb85598 100644 (file)
@@ -1161,7 +1161,7 @@ static int au1000_probe(struct platform_device *pdev)
 
        /* aup->mac is the base address of the MAC's registers */
        aup->mac = (struct mac_reg *)
-                       ioremap_nocache(base->start, resource_size(base));
+                       ioremap(base->start, resource_size(base));
        if (!aup->mac) {
                dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
                err = -ENXIO;
@@ -1169,7 +1169,7 @@ static int au1000_probe(struct platform_device *pdev)
        }
 
        /* Setup some variables for quick register address access */
-       aup->enable = (u32 *)ioremap_nocache(macen->start,
+       aup->enable = (u32 *)ioremap(macen->start,
                                                resource_size(macen));
        if (!aup->enable) {
                dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
@@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev)
        }
        aup->mac_id = pdev->id;
 
-       aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
+       aup->macdma = ioremap(macdma->start, resource_size(macdma));
        if (!aup->macdma) {
                dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
                err = -ENXIO;
index a880f10e3e7039bf5e270ea373ad55b065ce7cc0..8083173f1a8f3d9ba1cb92ceb9210b8c0122e171 100644 (file)
@@ -129,13 +129,13 @@ struct xgbe_stats {
 
 #define XGMAC_MMC_STAT(_string, _var)                          \
        { _string,                                              \
-         FIELD_SIZEOF(struct xgbe_mmc_stats, _var),            \
+         sizeof_field(struct xgbe_mmc_stats, _var),            \
          offsetof(struct xgbe_prv_data, mmc_stats._var),       \
        }
 
 #define XGMAC_EXT_STAT(_string, _var)                          \
        { _string,                                              \
-         FIELD_SIZEOF(struct xgbe_ext_stats, _var),            \
+         sizeof_field(struct xgbe_ext_stats, _var),            \
          offsetof(struct xgbe_prv_data, ext_stats._var),       \
        }
 
index a17a4da7bc158466082c5dd2591a0f9ff26bf05a..c85e3e29012c0be813dde299aa8731653362a791 100644 (file)
@@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
+       aq_nic_set_loopback(self);
+
        err = self->aq_hw_ops->hw_start(self->aq_hw);
        if (err < 0)
                goto err_exit;
@@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self)
 
        INIT_WORK(&self->service_task, aq_nic_service_task);
 
-       aq_nic_set_loopback(self);
-
        timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
        aq_nic_service_timer_cb(&self->service_timer);
 
index 2bb329606794b0f19a94bf214ad7c3d143dc5974..6b27af0db4992888ec1d2648ef6b51f5c89aabc3 100644 (file)
@@ -253,7 +253,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
                                goto err_free_aq_hw;
                        }
 
-                       self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+                       self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
                        if (!self->aq_hw->mmio) {
                                err = -EIO;
                                goto err_free_aq_hw;
index 58e891af6e09018682d7be6435923f9e38bf1e1b..ec041f78d0634426d76e23a23678c6be8321a69b 100644 (file)
@@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
        .rx_extract_ts           = hw_atl_b0_rx_extract_ts,
        .extract_hwts            = hw_atl_b0_extract_hwts,
        .hw_set_offload          = hw_atl_b0_hw_offload_set,
-       .hw_get_hw_stats         = hw_atl_utils_get_hw_stats,
-       .hw_get_fw_version       = hw_atl_utils_get_fw_version,
-       .hw_set_offload          = hw_atl_b0_hw_offload_set,
        .hw_set_loopback         = hw_atl_b0_set_loopback,
        .hw_set_fc               = hw_atl_b0_set_fc,
 };
index 8910b62e67ed79d938696fedbe5a2a7eaf7bdc03..f547baa6c95499e5f16313ea47b2c04f18f105ca 100644 (file)
@@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
        u32 speed;
 
        mpi_state = hw_atl_utils_mpi_get_state(self);
-       speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
-                            FW2X_RATE_2G5 | FW2X_RATE_5G |
-                            FW2X_RATE_10G);
+       speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
 
        if (!speed) {
                link_status->mbps = 0U;
index 8f5021091eeed5a138ed33056e0e71ee23912012..60ba69db48c603432cc907855efb3f79a2e10989 100644 (file)
@@ -313,7 +313,7 @@ struct ag71xx {
        struct ag71xx_desc *stop_desc;
        dma_addr_t stop_desc_dma;
 
-       int phy_if_mode;
+       phy_interface_t phy_if_mode;
 
        struct delayed_work restart_work;
        struct timer_list oom_timer;
@@ -1687,8 +1687,7 @@ static int ag71xx_probe(struct platform_device *pdev)
                goto err_free;
        }
 
-       ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
-                                           resource_size(res));
+       ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
        if (!ag->mac_base) {
                err = -ENOMEM;
                goto err_free;
@@ -1744,7 +1743,7 @@ static int ag71xx_probe(struct platform_device *pdev)
                eth_random_addr(ndev->dev_addr);
        }
 
-       err = of_get_phy_mode(np, ag->phy_if_mode);
+       err = of_get_phy_mode(np, &ag->phy_if_mode);
        if (err) {
                netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
                goto err_free;
index 035dbb1b2c985bd6bcd45a2a7782e1381b4c7ef0..ec25fd81985d63cc2821ee5774d616d9ae696a1a 100644 (file)
@@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
        int ethaddr_bytes = ETH_ALEN;
 
        memset(ppattern + offset, 0xff, magicsync);
-       for (j = 0; j < magicsync; j++)
-               set_bit(len++, (unsigned long *) pmask);
+       for (j = 0; j < magicsync; j++) {
+               pmask[len >> 3] |= BIT(len & 7);
+               len++;
+       }
 
        for (j = 0; j < B44_MAX_PATTERNS; j++) {
                if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
@@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
                for (k = 0; k< ethaddr_bytes; k++) {
                        ppattern[offset + magicsync +
                                (j * ETH_ALEN) + k] = macaddr[k];
-                       set_bit(len++, (unsigned long *) pmask);
+                       pmask[len >> 3] |= BIT(len & 7);
+                       len++;
                }
        }
        return len - 1;
index 825af709708ef7674795158be92fa7fb01fab303..d6b1a153f9df27be9716db319090263de18ca20a 100644 (file)
@@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
                ring->switch_queue = qp;
                ring->switch_port = port;
                ring->inspect = true;
-               priv->ring_map[q + port * num_tx_queues] = ring;
+               priv->ring_map[qp + port * num_tx_queues] = ring;
                qp++;
        }
 
@@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
        struct net_device *slave_dev;
        unsigned int num_tx_queues;
        struct net_device *dev;
-       unsigned int q, port;
+       unsigned int q, qp, port;
 
        priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
        if (priv->netdev != info->master)
@@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
                        continue;
 
                ring->inspect = false;
-               priv->ring_map[q + port * num_tx_queues] = NULL;
+               qp = ring->switch_queue;
+               priv->ring_map[qp + port * num_tx_queues] = NULL;
        }
 
        return 0;
index 8b08cb18e36387e8465dda447f3b42150cab28bb..3f63ffd7561bfa1ef34b43cefefbb90a8fcfe542 100644 (file)
@@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
                for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
                        u32 func_config =
                                MF_CFG_RD(bp,
-                                         func_mf_config[BP_PORT(bp) + 2 * i].
+                                         func_mf_config[BP_PATH(bp) + 2 * i].
                                          config);
                        func_num +=
                                ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
index 192ff8d5da3247c5f9f3b2fa0d7382adbdd83fb6..61fa32cdd3e33f4745f713e37a0fb281a3d21ecd 100644 (file)
@@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
  */
 static void bnx2x_parity_recover(struct bnx2x *bp)
 {
-       bool global = false;
        u32 error_recovered, error_unrecovered;
-       bool is_parity;
+       bool is_parity, global = false;
+#ifdef CONFIG_BNX2X_SRIOV
+       int vf_idx;
+
+       for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
+               struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
 
+               if (vf)
+                       vf->state = VF_LOST;
+       }
+#endif
        DP(NETIF_MSG_HW, "Handling parity\n");
        while (1) {
                switch (bp->recovery_state) {
@@ -14045,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                        rc = -ENOMEM;
                        goto init_one_freemem;
                }
-               bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+               bp->doorbells = ioremap(pci_resource_start(pdev, 2),
                                                doorbell_size);
        }
        if (!bp->doorbells) {
index 7a6e82db423123585574c88765fb00f14ab3b603..bacc8552bce1c98f7e9b9733e8cd1445948cecd4 100644 (file)
@@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
        ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
         func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
 
+#define BNX2X_VFS_VLAN_CREDIT(bp)      \
+       (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
+
 #define PF_VLAN_CREDIT_E2(bp, func_num)                                         \
-       ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+       ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \
         func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
 
 #endif /* BNX2X_SP_VERBS */
index b6ebd92ec565f14317b369c1bc6bc980f88d04f6..3a716c0154159135eaf78910e3c6240b4d176a80 100644 (file)
@@ -139,6 +139,7 @@ struct bnx2x_virtf {
 #define VF_ACQUIRED    1       /* VF acquired, but not initialized */
 #define VF_ENABLED     2       /* VF Enabled */
 #define VF_RESET       3       /* VF FLR'd, pending cleanup */
+#define VF_LOST                4       /* Recovery while VFs are loaded */
 
        bool flr_clnup_stage;   /* true during flr cleanup */
        bool malicious;         /* true if FW indicated so, until FLR */
index 0752b7fa4d9c005db32f7b537c43ffcd9df51f31..ea0e9394f898683d9b9564812c296a343859c94e 100644 (file)
@@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
 {
        int i;
 
+       if (vf->state == VF_LOST) {
+               /* Just ack the FW and return if VFs are lost
+                * in case of parity error. VFs are supposed to be timedout
+                * on waiting for PF response.
+                */
+               DP(BNX2X_MSG_IOV,
+                  "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
+
+               storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+               return;
+       }
+
        /* check if tlv type is known */
        if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
                /* Lock the per vf op mutex and note the locker's identity.
index 85983f0e3134725ee483bd04f72a617b7418d8cf..e6f18f6070ef7527b1f3552d049b08ee01f30998 100644 (file)
@@ -2001,6 +2001,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
        case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
                u32 data1 = le32_to_cpu(cmpl->event_data1);
 
+               if (!bp->fw_health)
+                       goto async_event_process_exit;
+
                bp->fw_reset_timestamp = jiffies;
                bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
                if (!bp->fw_reset_min_dsecs)
@@ -4421,8 +4424,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
                            FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
 
        req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
-       flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
-               FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
+       flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+       if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+               flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
        if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
                flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
                         FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
@@ -6186,7 +6190,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
                tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
                val = clamp_t(u16, tmr, 1,
                              coal_cap->cmpl_aggr_dma_tmr_during_int_max);
-               req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+               req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
                req->enables |=
                        cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
        }
@@ -7115,14 +7119,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (rc)
                goto err_recovery_out;
-       if (!fw_health) {
-               fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
-               bp->fw_health = fw_health;
-               if (!fw_health) {
-                       rc = -ENOMEM;
-                       goto err_recovery_out;
-               }
-       }
        fw_health->flags = le32_to_cpu(resp->flags);
        if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
            !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
@@ -8796,6 +8792,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
                if (fw_reset) {
                        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                                bnxt_ulp_stop(bp);
+                       bnxt_free_ctx_mem(bp);
+                       kfree(bp->ctx);
+                       bp->ctx = NULL;
                        rc = bnxt_fw_init_one(bp);
                        if (rc) {
                                set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -9990,8 +9989,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
        struct bnxt_fw_health *fw_health = bp->fw_health;
        u32 val;
 
-       if (!fw_health || !fw_health->enabled ||
-           test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+       if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                return;
 
        if (fw_health->tmr_counter) {
@@ -10482,6 +10480,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
        bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
 }
 
+static void bnxt_alloc_fw_health(struct bnxt *bp)
+{
+       if (bp->fw_health)
+               return;
+
+       if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
+           !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+               return;
+
+       bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
+       if (!bp->fw_health) {
+               netdev_warn(bp->dev, "Failed to allocate fw_health\n");
+               bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+               bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+       }
+}
+
 static int bnxt_fw_init_one_p1(struct bnxt *bp)
 {
        int rc;
@@ -10528,6 +10543,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
                netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
                            rc);
 
+       bnxt_alloc_fw_health(bp);
        rc = bnxt_hwrm_error_recovery_qcfg(bp);
        if (rc)
                netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
@@ -10609,6 +10625,12 @@ static int bnxt_fw_init_one(struct bnxt *bp)
        rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
        if (rc)
                return rc;
+
+       /* In case fw capabilities have changed, destroy the unneeded
+        * reporters and create newly capable ones.
+        */
+       bnxt_dl_fw_reporters_destroy(bp, false);
+       bnxt_dl_fw_reporters_create(bp);
        bnxt_fw_init_one_p3(bp);
        return 0;
 }
@@ -10751,8 +10773,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
                return;
        case BNXT_FW_RESET_STATE_ENABLE_DEV:
-               if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
-                   bp->fw_health) {
+               if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
                        u32 val;
 
                        val = bnxt_fw_health_readl(bp,
@@ -11044,11 +11065,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
        struct flow_keys *keys1 = &f1->fkeys;
        struct flow_keys *keys2 = &f2->fkeys;
 
-       if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
-           keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
-           keys1->ports.ports == keys2->ports.ports &&
-           keys1->basic.ip_proto == keys2->basic.ip_proto &&
-           keys1->basic.n_proto == keys2->basic.n_proto &&
+       if (keys1->basic.n_proto != keys2->basic.n_proto ||
+           keys1->basic.ip_proto != keys2->basic.ip_proto)
+               return false;
+
+       if (keys1->basic.n_proto == htons(ETH_P_IP)) {
+               if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+                   keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+                       return false;
+       } else {
+               if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
+                          sizeof(keys1->addrs.v6addrs.src)) ||
+                   memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
+                          sizeof(keys1->addrs.v6addrs.dst)))
+                       return false;
+       }
+
+       if (keys1->ports.ports == keys2->ports.ports &&
            keys1->control.flags == keys2->control.flags &&
            ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
            ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
@@ -11340,7 +11373,7 @@ int bnxt_get_port_parent_id(struct net_device *dev,
                return -EOPNOTSUPP;
 
        /* The PF and it's VF-reps only support the switchdev framework */
-       if (!BNXT_PF(bp))
+       if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
                return -EOPNOTSUPP;
 
        ppid->id_len = sizeof(bp->switch_id);
@@ -11396,11 +11429,11 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnxt *bp = netdev_priv(dev);
 
-       if (BNXT_PF(bp)) {
+       if (BNXT_PF(bp))
                bnxt_sriov_disable(bp);
-               bnxt_dl_unregister(bp);
-       }
 
+       bnxt_dl_fw_reporters_destroy(bp, true);
+       bnxt_dl_unregister(bp);
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
        bnxt_shutdown_tc(bp);
@@ -11415,6 +11448,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        bnxt_dcb_free(bp);
        kfree(bp->edev);
        bp->edev = NULL;
+       kfree(bp->fw_health);
+       bp->fw_health = NULL;
        bnxt_cleanup_pci(bp);
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
@@ -11711,6 +11746,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
        put_unaligned_le32(dw, &dsn[0]);
        pci_read_config_dword(pdev, pos + 4, &dw);
        put_unaligned_le32(dw, &dsn[4]);
+       bp->flags |= BNXT_FLAG_DSN_VALID;
        return 0;
 }
 
@@ -11822,9 +11858,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        if (BNXT_PF(bp)) {
                /* Read the adapter's DSN to use as the eswitch switch_id */
-               rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
-               if (rc)
-                       goto init_err_pci_clean;
+               bnxt_pcie_dsn_get(bp, bp->switch_id);
        }
 
        /* MTU range: 60 - FW defined max */
@@ -11875,8 +11909,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                goto init_err_cleanup_tc;
 
-       if (BNXT_PF(bp))
-               bnxt_dl_register(bp);
+       bnxt_dl_register(bp);
+       bnxt_dl_fw_reporters_create(bp);
 
        netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
                    board_info[ent->driver_data].name,
index 505af5cfb1bd7712998a20b14a807ca8498b49ab..f14335433a64db8763bfc1a2d50c39973b33c1f0 100644 (file)
@@ -1532,6 +1532,7 @@ struct bnxt {
        #define BNXT_FLAG_NO_AGG_RINGS  0x20000
        #define BNXT_FLAG_RX_PAGE_MODE  0x40000
        #define BNXT_FLAG_MULTI_HOST    0x100000
+       #define BNXT_FLAG_DSN_VALID     0x200000
        #define BNXT_FLAG_DOUBLE_DB     0x400000
        #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
        #define BNXT_FLAG_DIM           0x2000000
@@ -1936,9 +1937,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type)
        case HWRM_CFA_ENCAP_RECORD_FREE:
        case HWRM_CFA_DECAP_FILTER_ALLOC:
        case HWRM_CFA_DECAP_FILTER_FREE:
-       case HWRM_CFA_NTUPLE_FILTER_ALLOC:
-       case HWRM_CFA_NTUPLE_FILTER_FREE:
-       case HWRM_CFA_NTUPLE_FILTER_CFG:
        case HWRM_CFA_EM_FLOW_ALLOC:
        case HWRM_CFA_EM_FLOW_FREE:
        case HWRM_CFA_EM_FLOW_CFG:
index acb2dd64c023dcc60e5f8f766ef13c7ed1456567..3eedd44772182fc923cbd2c6fb3eb151d68e0dfa 100644 (file)
@@ -39,11 +39,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
                                     struct netlink_ext_ack *extack)
 {
        struct bnxt *bp = devlink_health_reporter_priv(reporter);
-       struct bnxt_fw_health *health = bp->fw_health;
        u32 val, health_status;
        int rc;
 
-       if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+       if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                return 0;
 
        val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
@@ -126,21 +125,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
        .recover = bnxt_fw_fatal_recover,
 };
 
-static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
 {
        struct bnxt_fw_health *health = bp->fw_health;
 
-       if (!health)
+       if (!bp->dl || !health)
                return;
 
-       health->fw_reporter =
-               devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
-                                              0, false, bp);
-       if (IS_ERR(health->fw_reporter)) {
-               netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
-                           PTR_ERR(health->fw_reporter));
-               health->fw_reporter = NULL;
-       }
+       if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
+               goto err_recovery;
 
        health->fw_reset_reporter =
                devlink_health_reporter_create(bp->dl,
@@ -150,8 +143,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
                netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
                            PTR_ERR(health->fw_reset_reporter));
                health->fw_reset_reporter = NULL;
+               bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+       }
+
+err_recovery:
+       if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+               return;
+
+       if (!health->fw_reporter) {
+               health->fw_reporter =
+                       devlink_health_reporter_create(bp->dl,
+                                                      &bnxt_dl_fw_reporter_ops,
+                                                      0, false, bp);
+               if (IS_ERR(health->fw_reporter)) {
+                       netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+                                   PTR_ERR(health->fw_reporter));
+                       health->fw_reporter = NULL;
+                       bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+                       return;
+               }
        }
 
+       if (health->fw_fatal_reporter)
+               return;
+
        health->fw_fatal_reporter =
                devlink_health_reporter_create(bp->dl,
                                               &bnxt_dl_fw_fatal_reporter_ops,
@@ -160,24 +175,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
                netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
                            PTR_ERR(health->fw_fatal_reporter));
                health->fw_fatal_reporter = NULL;
+               bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
        }
 }
 
-static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
 {
        struct bnxt_fw_health *health = bp->fw_health;
 
-       if (!health)
+       if (!bp->dl || !health)
                return;
 
-       if (health->fw_reporter)
-               devlink_health_reporter_destroy(health->fw_reporter);
-
-       if (health->fw_reset_reporter)
+       if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
+           health->fw_reset_reporter) {
                devlink_health_reporter_destroy(health->fw_reset_reporter);
+               health->fw_reset_reporter = NULL;
+       }
 
-       if (health->fw_fatal_reporter)
+       if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
+               return;
+
+       if (health->fw_reporter) {
+               devlink_health_reporter_destroy(health->fw_reporter);
+               health->fw_reporter = NULL;
+       }
+
+       if (health->fw_fatal_reporter) {
                devlink_health_reporter_destroy(health->fw_fatal_reporter);
+               health->fw_fatal_reporter = NULL;
+       }
 }
 
 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
@@ -185,9 +211,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
        struct bnxt_fw_health *fw_health = bp->fw_health;
        struct bnxt_fw_reporter_ctx fw_reporter_ctx;
 
-       if (!fw_health)
-               return;
-
        fw_reporter_ctx.sp_event = event;
        switch (event) {
        case BNXT_FW_RESET_NOTIFY_SP_EVENT:
@@ -247,6 +270,8 @@ static const struct devlink_ops bnxt_dl_ops = {
        .flash_update     = bnxt_dl_flash_update,
 };
 
+static const struct devlink_ops bnxt_vf_dl_ops;
+
 enum bnxt_dl_param_id {
        BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
        BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
@@ -460,7 +485,10 @@ int bnxt_dl_register(struct bnxt *bp)
                return -ENOTSUPP;
        }
 
-       dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+       if (BNXT_PF(bp))
+               dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+       else
+               dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
        if (!dl) {
                netdev_warn(bp->dev, "devlink_alloc failed");
                return -ENOMEM;
@@ -479,6 +507,9 @@ int bnxt_dl_register(struct bnxt *bp)
                goto err_dl_free;
        }
 
+       if (!BNXT_PF(bp))
+               return 0;
+
        rc = devlink_params_register(dl, bnxt_dl_params,
                                     ARRAY_SIZE(bnxt_dl_params));
        if (rc) {
@@ -506,8 +537,6 @@ int bnxt_dl_register(struct bnxt *bp)
 
        devlink_params_publish(dl);
 
-       bnxt_dl_fw_reporters_create(bp);
-
        return 0;
 
 err_dl_port_unreg:
@@ -530,12 +559,14 @@ void bnxt_dl_unregister(struct bnxt *bp)
        if (!dl)
                return;
 
-       bnxt_dl_fw_reporters_destroy(bp);
-       devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
-                                      ARRAY_SIZE(bnxt_dl_port_params));
-       devlink_port_unregister(&bp->dl_port);
-       devlink_params_unregister(dl, bnxt_dl_params,
-                                 ARRAY_SIZE(bnxt_dl_params));
+       if (BNXT_PF(bp)) {
+               devlink_port_params_unregister(&bp->dl_port,
+                                              bnxt_dl_port_params,
+                                              ARRAY_SIZE(bnxt_dl_port_params));
+               devlink_port_unregister(&bp->dl_port);
+               devlink_params_unregister(dl, bnxt_dl_params,
+                                         ARRAY_SIZE(bnxt_dl_params));
+       }
        devlink_unregister(dl);
        devlink_free(dl);
 }
index 665d4bdcd8c0269421003dd7ee66bc167d586017..6db6c3dac4728de4e51d513111d06a11f7f5fd39 100644 (file)
@@ -58,6 +58,8 @@ struct bnxt_dl_nvm_param {
 
 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
 void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_fw_reporters_create(struct bnxt *bp);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
 int bnxt_dl_register(struct bnxt *bp);
 void bnxt_dl_unregister(struct bnxt *bp);
 
index 2ccf79cdcb1ef91e176fdfcc8e1f237c92de478d..08d56ec7b68a503003c6cab305d56630803687ff 100644 (file)
@@ -3071,8 +3071,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
                        }
                }
 
-               if (info->dest_buf)
-                       memcpy(info->dest_buf + off, dma_buf, len);
+               if (info->dest_buf) {
+                       if ((info->seg_start + off + len) <=
+                           BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+                               memcpy(info->dest_buf + off, dma_buf, len);
+                       } else {
+                               rc = -ENOBUFS;
+                               break;
+                       }
+               }
 
                if (cmn_req->req_type ==
                                cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
@@ -3126,7 +3133,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
 
 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
                                           u16 segment_id, u32 *seg_len,
-                                          void *buf, u32 offset)
+                                          void *buf, u32 buf_len, u32 offset)
 {
        struct hwrm_dbg_coredump_retrieve_input req = {0};
        struct bnxt_hwrm_dbg_dma_info info = {NULL};
@@ -3141,8 +3148,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
                                seq_no);
        info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
                                     data_len);
-       if (buf)
+       if (buf) {
                info.dest_buf = buf + offset;
+               info.buf_len = buf_len;
+               info.seg_start = offset;
+       }
 
        rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
        if (!rc)
@@ -3232,14 +3242,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
 static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
 {
        u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+       u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
        struct coredump_segment_record *seg_record = NULL;
-       u32 offset = 0, seg_hdr_len, seg_record_len;
        struct bnxt_coredump_segment_hdr seg_hdr;
        struct bnxt_coredump coredump = {NULL};
        time64_t start_time;
        u16 start_utc;
        int rc = 0, i;
 
+       if (buf)
+               buf_len = *dump_len;
+
        start_time = ktime_get_real_seconds();
        start_utc = sys_tz.tz_minuteswest * 60;
        seg_hdr_len = sizeof(seg_hdr);
@@ -3272,6 +3285,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
                u32 duration = 0, seg_len = 0;
                unsigned long start, end;
 
+               if (buf && ((offset + seg_hdr_len) >
+                           BNXT_COREDUMP_BUF_LEN(buf_len))) {
+                       rc = -ENOBUFS;
+                       goto err;
+               }
+
                start = jiffies;
 
                rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
@@ -3284,9 +3303,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
 
                /* Write segment data into the buffer */
                rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
-                                                    &seg_len, buf,
+                                                    &seg_len, buf, buf_len,
                                                     offset + seg_hdr_len);
-               if (rc)
+               if (rc && rc == -ENOBUFS)
+                       goto err;
+               else if (rc)
                        netdev_err(bp->dev,
                                   "Failed to retrieve coredump for seg = %d\n",
                                   seg_record->segment_id);
@@ -3316,7 +3337,8 @@ err:
                                          rc);
        kfree(coredump.data);
        *dump_len += sizeof(struct bnxt_coredump_record);
-
+       if (rc == -ENOBUFS)
+               netdev_err(bp->dev, "Firmware returned large coredump buffer");
        return rc;
 }
 
index 4428d0abcbc1a61b45037149e944595b8ff14f0c..3576d951727b6ff6986f88d300bfb6a8294aee3b 100644 (file)
@@ -31,6 +31,8 @@ struct bnxt_coredump {
        u16             total_segs;
 };
 
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
+
 struct bnxt_hwrm_dbg_dma_info {
        void *dest_buf;
        int dest_buf_size;
@@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
        u16 seq_off;
        u16 data_len_off;
        u16 segs;
+       u32 seg_start;
+       u32 buf_len;
 };
 
 struct hwrm_dbg_cmn_input {
index c601ff7b8f61cdde62177430af3e2c61f481dd6a..4a316c4b3fa80d31b4bc0f8546e9ed291bdabd2b 100644 (file)
@@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
 {
        struct net_device *dev = edev->net;
        struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_hw_resc *hw_resc;
        int max_idx, max_cp_rings;
        int avail_msix, idx;
+       int total_vecs;
        int rc = 0;
 
        ASSERT_RTNL();
@@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
        }
        edev->ulp_tbl[ulp_id].msix_base = idx;
        edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
-       if (bp->total_irqs < (idx + avail_msix)) {
+       hw_resc = &bp->hw_resc;
+       total_vecs = idx + avail_msix;
+       if (bp->total_irqs < total_vecs ||
+           (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
                if (netif_running(dev)) {
                        bnxt_close_nic(bp, true, false);
                        rc = bnxt_open_nic(bp, true, false);
@@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
        }
 
        if (BNXT_NEW_RM(bp)) {
-               struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
                int resv_msix;
 
                resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
index f9bf7d7250abca8565684a2afa3bd9a29f347bbe..b010b34cdaf835fdf23eea5ffceaddce4386ec98 100644 (file)
@@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
        struct net_device *dev;
        int rc, i;
 
+       if (!(bp->flags & BNXT_FLAG_DSN_VALID))
+               return -ENODEV;
+
        bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
        if (!bp->vf_reps)
                return -ENOMEM;
index 120fa05a39ff201f40baa53559de9a54c50304e0..0a8624be44a912d197f1b9ae03f994b6c0871fe6 100644 (file)
@@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  DMA_END_ADDR);
 
        /* Initialize Tx NAPI */
-       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
-                      NAPI_POLL_WEIGHT);
+       netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+                         NAPI_POLL_WEIGHT);
 }
 
 /* Initialize a RDMA ring */
index 1604ad32e9202c7282661ccf62c99ee54ed6addb..f991537818fe939c1a66c36c19bc971ae1979de5 100644 (file)
@@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev)
 
        res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
        BUG_ON(!res);
-       sbm_base = ioremap_nocache(res->start, resource_size(res));
+       sbm_base = ioremap(res->start, resource_size(res));
        if (!sbm_base) {
                printk(KERN_ERR "%s: unable to map device registers\n",
                       dev_name(&pldev->dev));
index e338272931d14be95613a4c0ea97b610dd800439..01a50a4b21135fd2c347c6d80aadc3ca874bb5b6 100644 (file)
@@ -3477,7 +3477,7 @@ bnad_init(struct bnad *bnad,
        bnad->pcidev = pdev;
        bnad->mmio_start = pci_resource_start(pdev, 0);
        bnad->mmio_len = pci_resource_len(pdev, 0);
-       bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+       bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
        if (!bnad->bar0) {
                dev_err(&pdev->dev, "ioremap for bar0 failed\n");
                return -ENOMEM;
index 9c767ee252acc53e47fd0f093d17ea7e1670d43a..f7d87c71aaa9c399ccb462ae377a2e5c8b42db10 100644 (file)
@@ -611,21 +611,24 @@ static const struct phylink_mac_ops macb_phylink_ops = {
        .mac_link_up = macb_mac_link_up,
 };
 
+static bool macb_phy_handle_exists(struct device_node *dn)
+{
+       dn = of_parse_phandle(dn, "phy-handle", 0);
+       of_node_put(dn);
+       return dn != NULL;
+}
+
 static int macb_phylink_connect(struct macb *bp)
 {
+       struct device_node *dn = bp->pdev->dev.of_node;
        struct net_device *dev = bp->dev;
        struct phy_device *phydev;
        int ret;
 
-       if (bp->pdev->dev.of_node &&
-           of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
-               ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
-                                            0);
-               if (ret) {
-                       netdev_err(dev, "Could not attach PHY (%d)\n", ret);
-                       return ret;
-               }
-       } else {
+       if (dn)
+               ret = phylink_of_phy_connect(bp->phylink, dn, 0);
+
+       if (!dn || (ret && !macb_phy_handle_exists(dn))) {
                phydev = phy_find_first(bp->mii_bus);
                if (!phydev) {
                        netdev_err(dev, "no PHY found\n");
@@ -634,10 +637,11 @@ static int macb_phylink_connect(struct macb *bp)
 
                /* attach the mac to the phy */
                ret = phylink_connect_phy(bp->phylink, phydev);
-               if (ret) {
-                       netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
-                       return ret;
-               }
+       }
+
+       if (ret) {
+               netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+               return ret;
        }
 
        phylink_start(bp->phylink);
@@ -664,9 +668,30 @@ static int macb_mii_probe(struct net_device *dev)
        return 0;
 }
 
+static int macb_mdiobus_register(struct macb *bp)
+{
+       struct device_node *child, *np = bp->pdev->dev.of_node;
+
+       /* Only create the PHY from the device tree if at least one PHY is
+        * described. Otherwise scan the entire MDIO bus. We do this to support
+        * old device tree that did not follow the best practices and did not
+        * describe their network PHYs.
+        */
+       for_each_available_child_of_node(np, child)
+               if (of_mdiobus_child_is_phy(child)) {
+                       /* The loop increments the child refcount,
+                        * decrement it before returning.
+                        */
+                       of_node_put(child);
+
+                       return of_mdiobus_register(bp->mii_bus, np);
+               }
+
+       return mdiobus_register(bp->mii_bus);
+}
+
 static int macb_mii_init(struct macb *bp)
 {
-       struct device_node *np;
        int err = -ENXIO;
 
        /* Enable management port */
@@ -688,9 +713,7 @@ static int macb_mii_init(struct macb *bp)
 
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
-       np = bp->pdev->dev.of_node;
-
-       err = of_mdiobus_register(bp->mii_bus, np);
+       err = macb_mdiobus_register(bp);
        if (err)
                goto err_out_free_mdiobus;
 
@@ -4069,7 +4092,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
        mgmt->rate = 0;
        mgmt->hw.init = &init;
 
-       *tx_clk = clk_register(NULL, &mgmt->hw);
+       *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
        if (IS_ERR(*tx_clk))
                return PTR_ERR(*tx_clk);
 
@@ -4397,7 +4420,6 @@ err_out_free_netdev:
 
 err_disable_clocks:
        clk_disable_unprepare(tx_clk);
-       clk_unregister(tx_clk);
        clk_disable_unprepare(hclk);
        clk_disable_unprepare(pclk);
        clk_disable_unprepare(rx_clk);
@@ -4427,7 +4449,6 @@ static int macb_remove(struct platform_device *pdev)
                pm_runtime_dont_use_autosuspend(&pdev->dev);
                if (!pm_runtime_suspended(&pdev->dev)) {
                        clk_disable_unprepare(bp->tx_clk);
-                       clk_unregister(bp->tx_clk);
                        clk_disable_unprepare(bp->hclk);
                        clk_disable_unprepare(bp->pclk);
                        clk_disable_unprepare(bp->rx_clk);
index 0cc2338d8d2a81216d7a91e0d48ad704b7921fbd..dfc77507b159a8aa336a18eedded4f8ec2e01a03 100644 (file)
@@ -205,11 +205,11 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct,
        major_version = (u32)__cvmx_bootmem_desc_get(
                        oct, oct->bootmem_desc_addr,
                        offsetof(struct cvmx_bootmem_desc, major_version),
-                       FIELD_SIZEOF(struct cvmx_bootmem_desc, major_version));
+                       sizeof_field(struct cvmx_bootmem_desc, major_version));
        minor_version = (u32)__cvmx_bootmem_desc_get(
                        oct, oct->bootmem_desc_addr,
                        offsetof(struct cvmx_bootmem_desc, minor_version),
-                       FIELD_SIZEOF(struct cvmx_bootmem_desc, minor_version));
+                       sizeof_field(struct cvmx_bootmem_desc, minor_version));
 
        dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
                major_version);
@@ -237,13 +237,13 @@ static const struct cvmx_bootmem_named_block_desc
                                oct, named_addr,
                                offsetof(struct cvmx_bootmem_named_block_desc,
                                         base_addr),
-                               FIELD_SIZEOF(
+                               sizeof_field(
                                        struct cvmx_bootmem_named_block_desc,
                                        base_addr));
                desc->size = __cvmx_bootmem_desc_get(oct, named_addr,
                                offsetof(struct cvmx_bootmem_named_block_desc,
                                         size),
-                               FIELD_SIZEOF(
+                               sizeof_field(
                                        struct cvmx_bootmem_named_block_desc,
                                        size));
 
@@ -268,20 +268,20 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
                                        oct, oct->bootmem_desc_addr,
                                        offsetof(struct cvmx_bootmem_desc,
                                                 named_block_array_addr),
-                                       FIELD_SIZEOF(struct cvmx_bootmem_desc,
+                                       sizeof_field(struct cvmx_bootmem_desc,
                                                     named_block_array_addr));
                u32 num_blocks = (u32)__cvmx_bootmem_desc_get(
                                        oct, oct->bootmem_desc_addr,
                                        offsetof(struct cvmx_bootmem_desc,
                                                 nb_num_blocks),
-                                       FIELD_SIZEOF(struct cvmx_bootmem_desc,
+                                       sizeof_field(struct cvmx_bootmem_desc,
                                                     nb_num_blocks));
 
                u32 name_length = (u32)__cvmx_bootmem_desc_get(
                                        oct, oct->bootmem_desc_addr,
                                        offsetof(struct cvmx_bootmem_desc,
                                                 named_block_name_len),
-                                       FIELD_SIZEOF(struct cvmx_bootmem_desc,
+                                       sizeof_field(struct cvmx_bootmem_desc,
                                                     named_block_name_len));
 
                u64 named_addr = named_block_array_addr;
@@ -292,7 +292,7 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
                                         offsetof(
                                        struct cvmx_bootmem_named_block_desc,
                                        size),
-                                        FIELD_SIZEOF(
+                                        sizeof_field(
                                        struct cvmx_bootmem_named_block_desc,
                                        size));
 
index 58f89f6a040fe39ef02d18d1fa65123d7cc7619a..883cfa9c4b6da4496b694e4854439eda0df0a27a 100644 (file)
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
                if (!is_offload(adapter))
                        return -EOPNOTSUPP;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
                if (!(adapter->flags & FULL_INIT_DONE))
                        return -EIO;    /* need the memory controllers */
                if (copy_from_user(&t, useraddr, sizeof(t)))
@@ -3265,7 +3267,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_adapter;
        }
 
-       adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+       adapter->regs = ioremap(mmio_start, mmio_len);
        if (!adapter->regs) {
                dev_err(&pdev->dev, "cannot map device registers\n");
                err = -ENOMEM;
index a70ac2097892d290f6ab183c9a076bdc8f4fc996..becee29f5df7a647981c142b5987c0d0fc652cd9 100644 (file)
@@ -504,6 +504,7 @@ struct link_config {
 
        enum cc_pause  requested_fc;     /* flow control user has requested */
        enum cc_pause  fc;               /* actual link flow control */
+       enum cc_pause  advertised_fc;    /* actual advertised flow control */
 
        enum cc_fec    requested_fec;    /* Forward Error Correction: */
        enum cc_fec    fec;              /* requested and actual in use */
index 93868dca186af52f1da184cea208f9631b644099..4144c230dc97ab8cd44e661198023d90cb5df10f 100644 (file)
@@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
 static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = seq_tab_get_idx(seq->private, *pos + 1);
-       if (v)
-               ++*pos;
+       ++(*pos);
        return v;
 }
 
@@ -3048,6 +3047,9 @@ static int sge_queue_entries(const struct adapter *adap)
        int tot_uld_entries = 0;
        int i;
 
+       if (!is_uld(adap))
+               goto lld_only;
+
        mutex_lock(&uld_mutex);
        for (i = 0; i < CXGB4_TX_MAX; i++)
                tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
@@ -3058,6 +3060,7 @@ static int sge_queue_entries(const struct adapter *adap)
        }
        mutex_unlock(&uld_mutex);
 
+lld_only:
        return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
               (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
               tot_uld_entries +
index 20ab3b6285a2f750331aa14408dabf413c4c70bd..c837382ee522da107650f1c1f00c74741f7dda52 100644 (file)
@@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev,
        struct port_info *p = netdev_priv(dev);
 
        epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
-       epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
-       epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+       epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
+       epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
 }
 
 static int set_pauseparam(struct net_device *dev,
index 12ff69b3ba91d740cd95dfef3861d82057f9f971..0dedd3e9c31e9752c3eb7b9982287159c4876f07 100644 (file)
@@ -3135,9 +3135,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adap = pi->adapter;
+       struct ch_sched_queue qe = { 0 };
+       struct ch_sched_params p = { 0 };
        struct sched_class *e;
-       struct ch_sched_params p;
-       struct ch_sched_queue qe;
        u32 req_rate;
        int err = 0;
 
@@ -3154,6 +3154,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
                return -EINVAL;
        }
 
+       qe.queue = index;
+       e = cxgb4_sched_queue_lookup(dev, &qe);
+       if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
+               dev_err(adap->pdev_dev,
+                       "Queue %u already bound to class %u of type: %u\n",
+                       index, e->idx, e->info.u.params.level);
+               return -EBUSY;
+       }
+
        /* Convert from Mbps to Kbps */
        req_rate = rate * 1000;
 
@@ -3183,7 +3192,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
                return 0;
 
        /* Fetch any available unused or matching scheduling class */
-       memset(&p, 0, sizeof(p));
        p.type = SCHED_CLASS_TYPE_PACKET;
        p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
        p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
index 102b370fbd3eff9ba791f15257624c2ffcf5f690..6d485803ddbe66b17b431468ca7bd3b3b3a5f9e9 100644 (file)
@@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
        struct flow_action *actions = &cls->rule->action;
        struct port_info *pi = netdev2pinfo(dev);
        struct flow_action_entry *entry;
+       struct ch_sched_queue qe;
+       struct sched_class *e;
        u64 max_link_rate;
        u32 i, speed;
        int ret;
@@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
                }
        }
 
+       for (i = 0; i < pi->nqsets; i++) {
+               memset(&qe, 0, sizeof(qe));
+               qe.queue = i;
+
+               e = cxgb4_sched_queue_lookup(dev, &qe);
+               if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Some queues are already bound to different class");
+                       return -EBUSY;
+               }
+       }
+
        return 0;
 }
 
+static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct ch_sched_queue qe;
+       int ret;
+       u32 i;
+
+       for (i = 0; i < pi->nqsets; i++) {
+               qe.queue = i;
+               qe.class = tc;
+               ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
+               if (ret)
+                       goto out_free;
+       }
+
+       return 0;
+
+out_free:
+       while (i--) {
+               qe.queue = i;
+               qe.class = SCHED_CLS_NONE;
+               cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+       }
+
+       return ret;
+}
+
+static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct ch_sched_queue qe;
+       u32 i;
+
+       for (i = 0; i < pi->nqsets; i++) {
+               qe.queue = i;
+               qe.class = SCHED_CLS_NONE;
+               cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+       }
+}
+
 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
                                   struct tc_cls_matchall_offload *cls)
 {
@@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
        struct adapter *adap = netdev2adap(dev);
        struct flow_action_entry *entry;
        struct sched_class *e;
+       int ret;
        u32 i;
 
        tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
@@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
                return -ENOMEM;
        }
 
+       ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
+       if (ret) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Could not bind queues to traffic class");
+               goto out_free;
+       }
+
        tc_port_matchall->egress.hwtc = e->idx;
        tc_port_matchall->egress.cookie = cls->cookie;
        tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
        return 0;
+
+out_free:
+       cxgb4_sched_class_free(dev, e->idx);
+       return ret;
 }
 
 static void cxgb4_matchall_free_tc(struct net_device *dev)
@@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
        struct adapter *adap = netdev2adap(dev);
 
        tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+       cxgb4_matchall_tc_unbind_queues(dev);
        cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
 
        tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
index 477973d2e3412446bf4e0a0dbf0927959b9ffec1..ec3eb45ee3b4896f87ee8d9f739d829aacce4ea2 100644 (file)
@@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
        struct port_info *pi = netdev2pinfo(dev);
        struct adapter *adap = netdev2adap(dev);
        u32 speed, qcount = 0, qoffset = 0;
+       u32 start_a, start_b, end_a, end_b;
        int ret;
-       u8 i;
+       u8 i, j;
 
        if (!mqprio->qopt.num_tc)
                return 0;
@@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev,
                qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
                qcount += mqprio->qopt.count[i];
 
+               start_a = mqprio->qopt.offset[i];
+               end_a = start_a + mqprio->qopt.count[i] - 1;
+               for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
+                       start_b = mqprio->qopt.offset[j];
+                       end_b = start_b + mqprio->qopt.count[j] - 1;
+
+                       /* If queue count is 0, then the traffic
+                        * belonging to this class will not use
+                        * ETHOFLD queues. So, no need to validate
+                        * further.
+                        */
+                       if (!mqprio->qopt.count[i])
+                               break;
+
+                       if (!mqprio->qopt.count[j])
+                               continue;
+
+                       if (max_t(u32, start_a, start_b) <=
+                           min_t(u32, end_a, end_b)) {
+                               netdev_err(dev,
+                                          "Queues can't overlap across tc\n");
+                               return -EINVAL;
+                       }
+               }
+
                /* Convert byte per second to bits per second */
                min_rate += (mqprio->min_rate[i] * 8);
                max_rate += (mqprio->max_rate[i] * 8);
@@ -145,6 +171,10 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
                        kfree(adap->sge.eohw_rxq);
                        return -ENOMEM;
                }
+
+               refcount_set(&adap->tc_mqprio->refcnt, 1);
+       } else {
+               refcount_inc(&adap->tc_mqprio->refcnt);
        }
 
        if (!(adap->flags & CXGB4_USING_MSIX))
@@ -205,7 +235,6 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
                        cxgb4_enable_rx(adap, &eorxq->rspq);
        }
 
-       refcount_inc(&adap->tc_mqprio->refcnt);
        return 0;
 
 out_free_msix:
@@ -234,9 +263,10 @@ out_free_queues:
                t4_sge_free_ethofld_txq(adap, eotxq);
        }
 
-       kfree(adap->sge.eohw_txq);
-       kfree(adap->sge.eohw_rxq);
-
+       if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
+               kfree(adap->sge.eohw_txq);
+               kfree(adap->sge.eohw_rxq);
+       }
        return ret;
 }
 
index e9e45006632da66dcb0bc49ba783343f8ad166b4..1a16449e9deb7075414618d5bbc51d7e65c0e2f9 100644 (file)
@@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = l2t_get_idx(seq, *pos);
-       if (v)
-               ++*pos;
+       ++(*pos);
        return v;
 }
 
index 3e61bd5d0c290c75b792d887e2dd2864eef13728..cebe1412d9609b8bfdb62ed5091c9dab64a5c3d2 100644 (file)
@@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
        return found;
 }
 
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+                                            struct ch_sched_queue *p)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct sched_queue_entry *qe = NULL;
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_txq *txq;
+
+       if (p->queue < 0 || p->queue >= pi->nqsets)
+               return NULL;
+
+       txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+       qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+       return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
+}
+
 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
 {
        struct sched_queue_entry *qe = NULL;
index e92ff68bdd0aab504cf5e674b8d3268c008b4ed8..5cc74a5a177463cc68306f947becfa8fe9390089 100644 (file)
@@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
        return true;
 }
 
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+                                            struct ch_sched_queue *p);
 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
                           enum sched_bind_type type);
 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
index 19d18acfc9a6f3fc79050b5e32bfcf5cdb4621f9..844fdcf55118b232204ab87dac45b25dba5b6d18 100644 (file)
@@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
                if (cc_pause & PAUSE_TX)
                        fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
                else
-                       fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+                       fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
+                                   FW_PORT_CAP32_802_3_PAUSE;
        } else if (cc_pause & PAUSE_TX) {
                fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
        }
@@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
 {
        const struct fw_port_cmd *cmd = (const void *)rpl;
-       int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
-       struct adapter *adapter = pi->adapter;
+       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
        struct link_config *lc = &pi->link_cfg;
-       int link_ok, linkdnrc;
-       enum fw_port_type port_type;
+       struct adapter *adapter = pi->adapter;
+       unsigned int speed, fc, fec, adv_fc;
        enum fw_port_module_type mod_type;
-       unsigned int speed, fc, fec;
-       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+       int action, link_ok, linkdnrc;
+       enum fw_port_type port_type;
 
        /* Extract the various fields from the Port Information message.
         */
+       action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
        switch (action) {
        case FW_PORT_ACTION_GET_PORT_INFO: {
                u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
        }
 
        fec = fwcap_to_cc_fec(acaps);
+       adv_fc = fwcap_to_cc_pause(acaps);
        fc = fwcap_to_cc_pause(linkattr);
        speed = fwcap_to_speed(linkattr);
 
@@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
        }
 
        if (link_ok != lc->link_ok || speed != lc->speed ||
-           fc != lc->fc || fec != lc->fec) {   /* something changed */
+           fc != lc->fc || adv_fc != lc->advertised_fc ||
+           fec != lc->fec) {
+               /* something changed */
                if (!link_ok && lc->link_ok) {
                        lc->link_down_rc = linkdnrc;
                        dev_warn_ratelimited(adapter->pdev_dev,
@@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
                }
                lc->link_ok = link_ok;
                lc->speed = speed;
+               lc->advertised_fc = adv_fc;
                lc->fc = fc;
                lc->fec = fec;
 
index f6fc0875d5b0a285cf4dc5695debdf96b3cf2744..f4d41f968afa270df17efe4d4f502b79dab5c01e 100644 (file)
@@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
        struct port_info *pi = netdev_priv(dev);
 
        pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
-       pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
-       pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+       pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
+       pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
 }
 
 /*
index ccca67cf4487d22d06f7c2b42dc6b16647cec8d0..57cfd10a99ecc8508735aa4d5accd96d75176ebb 100644 (file)
@@ -135,6 +135,7 @@ struct link_config {
 
        enum cc_pause   requested_fc;   /* flow control user has requested */
        enum cc_pause   fc;             /* actual link flow control */
+       enum cc_pause   advertised_fc;  /* actual advertised flow control */
 
        enum cc_fec     auto_fec;       /* Forward Error Correction: */
        enum cc_fec     requested_fec;  /*   "automatic" (IEEE 802.3), */
index 8a389d617a238048f268be38771e37c808dd84e7..9d49ff211cc1a50ed0d9404a05119fb70e63dd73 100644 (file)
@@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
 static void t4vf_handle_get_port_info(struct port_info *pi,
                                      const struct fw_port_cmd *cmd)
 {
-       int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
-       struct adapter *adapter = pi->adapter;
+       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
        struct link_config *lc = &pi->link_cfg;
-       int link_ok, linkdnrc;
-       enum fw_port_type port_type;
+       struct adapter *adapter = pi->adapter;
+       unsigned int speed, fc, fec, adv_fc;
        enum fw_port_module_type mod_type;
-       unsigned int speed, fc, fec;
-       fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
+       int action, link_ok, linkdnrc;
+       enum fw_port_type port_type;
 
        /* Extract the various fields from the Port Information message. */
+       action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
        switch (action) {
        case FW_PORT_ACTION_GET_PORT_INFO: {
                u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
@@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
        }
 
        fec = fwcap_to_cc_fec(acaps);
+       adv_fc = fwcap_to_cc_pause(acaps);
        fc = fwcap_to_cc_pause(linkattr);
        speed = fwcap_to_speed(linkattr);
 
@@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
        }
 
        if (link_ok != lc->link_ok || speed != lc->speed ||
-           fc != lc->fc || fec != lc->fec) {   /* something changed */
+           fc != lc->fc || adv_fc != lc->advertised_fc ||
+           fec != lc->fec) {
+               /* something changed */
                if (!link_ok && lc->link_ok) {
                        lc->link_down_rc = linkdnrc;
                        dev_warn_ratelimited(adapter->pdev_dev,
@@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
                }
                lc->link_ok = link_ok;
                lc->speed = speed;
+               lc->advertised_fc = adv_fc;
                lc->fc = fc;
                lc->fec = fec;
 
index a8f4c69252ff886a6a48bcdc1454489750a201a7..2814b96751b4f2e50f11e9e31dbdbcde08c3ec3e 100644 (file)
@@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev)
 
        if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
                dev_warn(geth->dev, "TX queue base is not aligned\n");
+               dma_free_coherent(geth->dev, len * sizeof(*desc_ring),
+                                 desc_ring, port->txq_dma_base);
                kfree(skb_tab);
                return -ENOMEM;
        }
index f1a2da15dd0a6f00718eb68fb1eb82214c2c484d..7852a4308194258eaa9e61040d09665b81bc1218 100644 (file)
@@ -2039,7 +2039,7 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* remap CSR registers */
-       regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+       regs = ioremap(pciaddr, DE_REGS_SIZE);
        if (!regs) {
                rc = -EIO;
                pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
index 5bb5abf99588747a5738e102e559f5e4108a8181..022a54a1805b46c85819964d79ac6ca3114cc379 100644 (file)
@@ -23,7 +23,7 @@ struct be_ethtool_stat {
 };
 
 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
-#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
+#define FIELDINFO(_struct, field) sizeof_field(_struct, field), \
                                        offsetof(_struct, field)
 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
                                        FIELDINFO(struct be_tx_stats, field)
index ea4f17f5cce77e6943340d5ccd69dc4042f1faef..c6e74ae0ff0d6950d2c0e7849abdbb0bff826dc4 100644 (file)
@@ -1087,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv = netdev_priv(netdev);
        priv->netdev = netdev;
 
-       priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
+       priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr,
                        resource_size(mmio));
        if (!priv->iobase) {
                dev_err(&pdev->dev, "cannot remap I/O memory space\n");
@@ -1096,7 +1096,7 @@ static int ethoc_probe(struct platform_device *pdev)
        }
 
        if (netdev->mem_end) {
-               priv->membase = devm_ioremap_nocache(&pdev->dev,
+               priv->membase = devm_ioremap(&pdev->dev,
                        netdev->mem_start, resource_size(mem));
                if (!priv->membase) {
                        dev_err(&pdev->dev, "cannot remap memory space\n");
index 6a9d12dad5d9d7be22a8f10970d8173e13dc858e..a301f00952231f75371879b8b254df8abfeb4942 100644 (file)
@@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
        int page_offset;
        unsigned int sz;
        int *count_ptr;
-       int i;
+       int i, j;
 
        vaddr = phys_to_virt(addr);
        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
@@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
                WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
                                    SMP_CACHE_BYTES));
 
+               dma_unmap_page(priv->rx_dma_dev, sg_addr,
+                              DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
                /* We may use multiple Rx pools */
                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
                if (!dpaa_bp)
                        goto free_buffers;
 
-               count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-               dma_unmap_page(priv->rx_dma_dev, sg_addr,
-                              DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
                if (!skb) {
                        sz = dpaa_bp->size +
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
                        skb_add_rx_frag(skb, i - 1, head_page, frag_off,
                                        frag_len, dpaa_bp->size);
                }
+
                /* Update the pool count for the current {cpu x bpool} */
+               count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
                (*count_ptr)--;
 
                if (qm_sg_entry_is_final(&sgt[i]))
@@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
        return skb;
 
 free_buffers:
-       /* compensate sw bpool counter changes */
-       for (i--; i >= 0; i--) {
-               dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
-               if (dpaa_bp) {
-                       count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-                       (*count_ptr)++;
-               }
-       }
        /* free all the SG entries */
-       for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
-               sg_addr = qm_sg_addr(&sgt[i]);
+       for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
+               sg_addr = qm_sg_addr(&sgt[j]);
                sg_vaddr = phys_to_virt(sg_addr);
+               /* all pages 0..i were unmaped */
+               if (j > i)
+                       dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
+                                      DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
                free_pages((unsigned long)sg_vaddr, 0);
-               dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
-               if (dpaa_bp) {
-                       count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-                       (*count_ptr)--;
+               /* counters 0..i-1 were decremented */
+               if (j >= i) {
+                       dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
+                       if (dpaa_bp) {
+                               count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+                               (*count_ptr)--;
+                       }
                }
 
-               if (qm_sg_entry_is_final(&sgt[i]))
+               if (qm_sg_entry_is_final(&sgt[j]))
                        break;
        }
        /* free the SGT fragment */
index a9503aea527f54853720eb8aeb457f4720e7eb5b..6437fe6b9abf037e6ebee2aeea0cfdc76e496658 100644 (file)
@@ -160,10 +160,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
        irq = mc_dev->irqs[0];
        ptp_qoriq->irq = irq->msi_desc->irq;
 
-       err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL,
-                                       dpaa2_ptp_irq_handler_thread,
-                                       IRQF_NO_SUSPEND | IRQF_ONESHOT,
-                                       dev_name(dev), ptp_qoriq);
+       err = request_threaded_irq(ptp_qoriq->irq, NULL,
+                                  dpaa2_ptp_irq_handler_thread,
+                                  IRQF_NO_SUSPEND | IRQF_ONESHOT,
+                                  dev_name(dev), ptp_qoriq);
        if (err < 0) {
                dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
                goto err_free_mc_irq;
@@ -173,18 +173,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
                                   DPRTC_IRQ_INDEX, 1);
        if (err < 0) {
                dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
-               goto err_free_mc_irq;
+               goto err_free_threaded_irq;
        }
 
        err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
        if (err)
-               goto err_free_mc_irq;
+               goto err_free_threaded_irq;
 
        dpaa2_phc_index = ptp_qoriq->phc_index;
        dev_set_drvdata(dev, ptp_qoriq);
 
        return 0;
 
+err_free_threaded_irq:
+       free_irq(ptp_qoriq->irq, ptp_qoriq);
 err_free_mc_irq:
        fsl_mc_free_irqs(mc_dev);
 err_unmap:
index 05c1899f6628990e14e167e338fcc435c2ecea52..9294027e9d909836cf211c3955d75bc81c97de15 100644 (file)
@@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev,
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+       struct device *dev = &fep->pdev->dev;
        u32 *buf = (u32 *)regbuf;
        u32 i, off;
+       int ret;
+
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0)
+               return;
 
        regs->version = fec_enet_register_version;
 
@@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev,
                off >>= 2;
                buf[off] = readl(&theregs[off]);
        }
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 static int fec_enet_get_ts_info(struct net_device *ndev,
index 41c6fa200e7467af31a05ef4c365705fe89f2329..e1901874c19f09417e47e78d5c1e366b51b21992 100644 (file)
@@ -110,7 +110,7 @@ do {                                                                        \
 /* Interface Mode Register (IF_MODE) */
 
 #define IF_MODE_MASK           0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII          0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G            0x00000000 /* 30-31 10G interface */
 #define IF_MODE_GMII           0x00000002 /* 30-31 GMII (1G) interface */
 #define IF_MODE_RGMII          0x00000004
 #define IF_MODE_RGMII_AUTO     0x00008000
@@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
        tmp = 0;
        switch (phy_if) {
        case PHY_INTERFACE_MODE_XGMII:
-               tmp |= IF_MODE_XGMII;
+               tmp |= IF_MODE_10G;
                break;
        default:
                tmp |= IF_MODE_GMII;
index e03b30c60dcfda168291ce7f3a93306b3f3748de..c82c85ef5fb3407d1a0016573b325dd39e068ffa 100644 (file)
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
 struct mdio_fsl_priv {
        struct  tgec_mdio_controller __iomem *mdio_base;
        bool    is_little_endian;
+       bool    has_a011043;
 };
 
 static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
                return ret;
 
        /* Return all Fs if nothing was there */
-       if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+       if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+           !priv->has_a011043) {
                dev_err(&bus->dev,
                        "Error while reading PHY%d reg at %d.%hhu\n",
                        phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
                                                       "little-endian");
 
+       priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+                                                 "fsl,erratum-a011043");
+
        ret = of_mdiobus_register(bus, np);
        if (ret) {
                dev_err(&pdev->dev, "cannot register MDIO bus\n");
index edec61dfc8687fff6f4867e4463753d28aab202e..9f52e72ff641d91bbeb43f2efe8d12c5a5184204 100644 (file)
@@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
        rx->cnt = cnt;
        rx->fill_cnt += work_done;
 
-       /* restock desc ring slots */
-       dma_wmb();      /* Ensure descs are visible before ringing doorbell */
        gve_rx_write_doorbell(priv, rx);
        return gve_rx_work_pending(rx);
 }
index f4889431f9b7049f0046338a3bed0884ade84782..d0244feb030118e857ec44155128133322c3272a 100644 (file)
@@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
                 * may have added descriptors without ringing the doorbell.
                 */
 
-               /* Ensure tx descs from a prior gve_tx are visible before
-                * ringing doorbell.
-                */
-               dma_wmb();
                gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
                return NETDEV_TX_BUSY;
        }
@@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
        if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
                return NETDEV_TX_OK;
 
-       /* Ensure tx descs are visible before ringing doorbell */
-       dma_wmb();
        gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
        return NETDEV_TX_OK;
 }
index 3e9b6d543c77072b97bccc7aeb374a5eacdc69a3..150a8ccfb8b121c6d422214976ad86f364a8c324 100644 (file)
@@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        skb_tx_timestamp(skb);
 
        hip04_set_xmit_desc(priv, phys);
-       priv->tx_head = TX_NEXT(tx_head);
        count++;
        netdev_sent_queue(ndev, skb->len);
+       priv->tx_head = TX_NEXT(tx_head);
 
        stats->tx_bytes += skb->len;
        stats->tx_packets++;
index 14ab20491fd02b1c867460ce42b4aa310f50a162..eb69e5c81a4d05a26a7210dc8bda00126ddbab25 100644 (file)
@@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
        skb = *out_skb = napi_alloc_skb(&ring_data->napi,
                                        HNS_RX_HEAD_SIZE);
        if (unlikely(!skb)) {
-               netdev_err(ndev, "alloc rx skb fail\n");
                ring->stats.sw_err_cnt++;
                return -ENOMEM;
        }
@@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
                container_of(napi, struct hns_nic_ring_data, napi);
        struct hnae_ring *ring = ring_data->ring;
 
-try_again:
        clean_complete += ring_data->poll_one(
                                ring_data, budget - clean_complete,
                                ring_data->ex_process);
@@ -1066,7 +1064,7 @@ try_again:
                        napi_complete(napi);
                        ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
                } else {
-                       goto try_again;
+                       return budget;
                }
        }
 
index 69545dd6c9380af7504dca623eafd59448f5930b..b3deb5e5ce29fbea66b38f84e9355c200505b9b8 100644 (file)
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
 #define HNS3_INNER_VLAN_TAG    1
 #define HNS3_OUTER_VLAN_TAG    2
 
+#define HNS3_MIN_TX_LEN                33U
+
 /* hns3_pci_tbl - PCI Device ID Table
  *
  * Last entry must be all 0s
@@ -1405,6 +1407,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
        int bd_num = 0;
        int ret;
 
+       /* Hardware can only handle short frames above 32 bytes */
+       if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+               return NETDEV_TX_OK;
+
        /* Prefetch the data used later */
        prefetch(skb->data);
 
index d862e9ba27e158e91afbc04669a3eb738bec765e..13dbd249f35fa346501457646cd8ef3ae0e992a2 100644 (file)
@@ -10240,7 +10240,7 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
                return ret;
        }
 
-       data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
+       data_len_per_desc = sizeof_field(struct hclge_desc, data);
        *len = 0;
        for (i = 0; i < dfx_reg_type_num; i++) {
                bd_num = bd_num_list[i];
index fbc39a2480d058d6bd8caa47ac4d5eb3a278ea60..180224eab1ca4a46c34e9c62061c7087cd22bfb4 100644 (file)
@@ -614,7 +614,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
        }
 
        memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
-              FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
+              sizeof_field(struct hnae3_knic_private_info, prio_tc));
 }
 
 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
index 60ec48fe4144693fb005ff6c16c00437369aaa9a..966aea949c0bdc5c8d2252de53ba1d6caed9c4c2 100644 (file)
@@ -450,7 +450,7 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
 
 #define HINIC_FUNC_STAT(_stat_item) {  \
        .name = #_stat_item, \
-       .size = FIELD_SIZEOF(struct hinic_vport_stats, _stat_item), \
+       .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
        .offset = offsetof(struct hinic_vport_stats, _stat_item) \
 }
 
@@ -477,7 +477,7 @@ static struct hinic_stats hinic_function_stats[] = {
 
 #define HINIC_PORT_STAT(_stat_item) { \
        .name = #_stat_item, \
-       .size = FIELD_SIZEOF(struct hinic_phy_port_stats, _stat_item), \
+       .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
        .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
 }
 
@@ -571,7 +571,7 @@ static struct hinic_stats hinic_port_stats[] = {
 
 #define HINIC_TXQ_STAT(_stat_item) { \
        .name = "txq%d_"#_stat_item, \
-       .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \
+       .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
        .offset = offsetof(struct hinic_txq_stats, _stat_item) \
 }
 
@@ -586,7 +586,7 @@ static struct hinic_stats hinic_tx_queue_stats[] = {
 
 #define HINIC_RXQ_STAT(_stat_item) { \
        .name = "rxq%d_"#_stat_item, \
-       .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \
+       .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
        .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
 }
 
index 6436a98c5953fcd92ad37e1436e784b53b39c709..22f5887578b2bdf1e9a30b86c0564b02391c04e6 100644 (file)
@@ -91,10 +91,10 @@ static int sni_82596_probe(struct platform_device *dev)
        idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
        if (!res || !ca || !options || !idprom)
                return -ENODEV;
-       mpu_addr = ioremap_nocache(res->start, 4);
+       mpu_addr = ioremap(res->start, 4);
        if (!mpu_addr)
                return -ENOMEM;
-       ca_addr = ioremap_nocache(ca->start, 4);
+       ca_addr = ioremap(ca->start, 4);
        if (!ca_addr)
                goto probe_failed_free_mpu;
 
@@ -110,7 +110,7 @@ static int sni_82596_probe(struct platform_device *dev)
        netdevice->base_addr = res->start;
        netdevice->irq = platform_get_irq(dev, 0);
 
-       eth_addr = ioremap_nocache(idprom->start, 0x10);
+       eth_addr = ioremap(idprom->start, 0x10);
        if (!eth_addr)
                goto probe_failed;
 
index c900807819244b0a00931fc191d72979f4520740..830791ab4619c6f36e75f917c607220344077313 100644 (file)
@@ -184,7 +184,7 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
                        netdev_err(netdev, "Device down!\n");
                        return -ENODEV;
                }
-               if (retry--)
+               if (!retry--)
                        break;
                if (wait_for_completion_timeout(comp_done, div_timeout))
                        return 0;
index 6c51b1bad8c423ea243587446d3e5fc6705ca9ad..37a2314d3e6b187dd061793870af2fc46552829b 100644 (file)
@@ -185,13 +185,12 @@ struct e1000_phy_regs {
 
 /* board specific private data structure */
 struct e1000_adapter {
+       struct timer_list watchdog_timer;
        struct timer_list phy_info_timer;
        struct timer_list blink_timer;
 
        struct work_struct reset_task;
-       struct delayed_work watchdog_task;
-
-       struct workqueue_struct *e1000_workqueue;
+       struct work_struct watchdog_task;
 
        const struct e1000_info *ei;
 
index fe7997c18a109a2935978ca5248cd7413e0a342b..7c5b18d87b49b6fd822beb2dd254e4e0ad196759 100644 (file)
@@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
                }
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
-                       mod_delayed_work(adapter->e1000_workqueue,
-                                        &adapter->watchdog_task, HZ);
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
        /* Reset on uncorrectable ECC error */
@@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
                }
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
-                       mod_delayed_work(adapter->e1000_workqueue,
-                                        &adapter->watchdog_task, HZ);
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
        /* Reset on uncorrectable ECC error */
@@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
                hw->mac.get_link_status = true;
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
-                       mod_delayed_work(adapter->e1000_workqueue,
-                                        &adapter->watchdog_task, HZ);
+                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
        if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
 
        napi_synchronize(&adapter->napi);
 
+       del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
        spin_lock(&adapter->stats64_lock);
@@ -5155,11 +5153,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
        }
 }
 
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+       struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+       /* Do the rest outside of interrupt context */
+       schedule_work(&adapter->watchdog_task);
+
+       /* TODO: make this use queue_delayed_work() */
+}
+
 static void e1000_watchdog_task(struct work_struct *work)
 {
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
-                                                    watchdog_task.work);
+                                                    watchdog_task);
        struct net_device *netdev = adapter->netdev;
        struct e1000_mac_info *mac = &adapter->hw.mac;
        struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -5407,9 +5419,8 @@ link_up:
 
        /* Reset the timer */
        if (!test_bit(__E1000_DOWN, &adapter->state))
-               queue_delayed_work(adapter->e1000_workqueue,
-                                  &adapter->watchdog_task,
-                                  round_jiffies(2 * HZ));
+               mod_timer(&adapter->watchdog_timer,
+                         round_jiffies(jiffies + 2 * HZ));
 }
 
 #define E1000_TX_FLAGS_CSUM            0x00000001
@@ -7449,21 +7460,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_eeprom;
        }
 
-       adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
-                                                  e1000e_driver_name);
-
-       if (!adapter->e1000_workqueue) {
-               err = -ENOMEM;
-               goto err_workqueue;
-       }
-
-       INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
-       queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
-                          0);
-
+       timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
        timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
 
        INIT_WORK(&adapter->reset_task, e1000_reset_task);
+       INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
        INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
        INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
        INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7557,9 +7558,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_register:
-       flush_workqueue(adapter->e1000_workqueue);
-       destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_release_hw_control(adapter);
 err_eeprom:
@@ -7604,17 +7602,15 @@ static void e1000_remove(struct pci_dev *pdev)
         * from being rescheduled.
         */
        set_bit(__E1000_DOWN, &adapter->state);
+       del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
 
        cancel_work_sync(&adapter->reset_task);
+       cancel_work_sync(&adapter->watchdog_task);
        cancel_work_sync(&adapter->downshift_task);
        cancel_work_sync(&adapter->update_phy_task);
        cancel_work_sync(&adapter->print_hang_task);
 
-       cancel_delayed_work(&adapter->watchdog_task);
-       flush_workqueue(adapter->e1000_workqueue);
-       destroy_workqueue(adapter->e1000_workqueue);
-
        if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
                cancel_work_sync(&adapter->tx_hwtstamp_work);
                if (adapter->tx_hwtstamp_skb) {
index c681d2d28107dd163415bc0b381b7d8508a5bb5c..68edf55ac90625b5e69e5e1009e8539281f604ae 100644 (file)
@@ -18,7 +18,7 @@ struct fm10k_stats {
 
 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \
        .stat_string = _name, \
-       .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+       .sizeof_stat = sizeof_field(_type, _stat), \
        .stat_offset = offsetof(_type, _stat) \
 }
 
index cb6367334ca7816cbf9ae49e3e392c8f1200abef..4833187bd25911a760f3a7d05d0394353eec56d4 100644 (file)
@@ -1152,7 +1152,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
 
 static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
 {
-       return !!vsi->xdp_prog;
+       return !!READ_ONCE(vsi->xdp_prog);
 }
 
 int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
index 9f0a4e92a2317f82a6bf521ce7610b28ba6391da..37514a75f9288c94c026daee1947b191251b44b4 100644 (file)
@@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
                    (aq->api_maj_ver == 1 &&
                     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
                        hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+               if (aq->api_maj_ver > 1 ||
+                   (aq->api_maj_ver == 1 &&
+                    aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+                       hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
                /* fall through */
        default:
                break;
index d4055037af89ebc7af31e6cc69a5526016c594c3..45b90eb11adbaa3f5d39f86c01e9ce82066b79c9 100644 (file)
@@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
         */
        pba_size--;
        if (pba_num_size < (((u32)pba_size * 2) + 1)) {
-               hw_dbg(hw, "Buffer to small for PBA data.\n");
+               hw_dbg(hw, "Buffer too small for PBA data.\n");
                return I40E_ERR_PARAM;
        }
 
index d24d8731bef02727decbf530df6746c33e0b9761..317f3f1458db443d89f2e626dca4cee3b8cff463 100644 (file)
@@ -43,7 +43,7 @@ struct i40e_stats {
  */
 #define I40E_STAT(_type, _name, _stat) { \
        .stat_string = _name, \
-       .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+       .sizeof_stat = sizeof_field(_type, _stat), \
        .stat_offset = offsetof(_type, _stat) \
 }
 
index be24d42280d823bbf9d20181b4731282afb604e2..a3da422ab05b6d2e46da9dc1016a351d190fca1c 100644 (file)
@@ -659,7 +659,7 @@ i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
 
 #define I40E_HMC_STORE(_struct, _ele)          \
        offsetof(struct _struct, _ele),         \
-       FIELD_SIZEOF(struct _struct, _ele)
+       sizeof_field(struct _struct, _ele)
 
 struct i40e_context_ele {
        u16 offset;
index 1ccabeafa44c4622b48167de77f8151e03d16fa9..2c5af6d4a6b1c641ea2c47f44469a530df363850 100644 (file)
@@ -6823,8 +6823,8 @@ void i40e_down(struct i40e_vsi *vsi)
        for (i = 0; i < vsi->num_queue_pairs; i++) {
                i40e_clean_tx_ring(vsi->tx_rings[i]);
                if (i40e_enabled_xdp_vsi(vsi)) {
-                       /* Make sure that in-progress ndo_xdp_xmit
-                        * calls are completed.
+                       /* Make sure that in-progress ndo_xdp_xmit and
+                        * ndo_xsk_wakeup calls are completed.
                         */
                        synchronize_rcu();
                        i40e_clean_tx_ring(vsi->xdp_rings[i]);
@@ -12546,8 +12546,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
 
        old_prog = xchg(&vsi->xdp_prog, prog);
 
-       if (need_reset)
+       if (need_reset) {
+               if (!prog)
+                       /* Wait until ndo_xsk_wakeup completes. */
+                       synchronize_rcu();
                i40e_reset_and_rebuild(pf, true, true);
+       }
 
        for (i = 0; i < vsi->num_queue_pairs; i++)
                WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
index 6a3f0fc56c3b95406d9782ba52c8bb6b65657e7b..69523ac85639ef2204223352e86c5bedb62a9fbb 100644 (file)
@@ -2321,6 +2321,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
        return ret;
 }
 
+/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+       if ((!vqs->rx_queues && !vqs->tx_queues) ||
+           vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+           vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+               return false;
+
+       return true;
+}
+
 /**
  * i40e_vc_enable_queues_msg
  * @vf: pointer to the VF info
@@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
                goto error_param;
        }
 
-       if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+       if (i40e_vc_validate_vqs_bitmaps(vqs)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
                goto error_param;
        }
 
-       if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
-           vqs->rx_queues > I40E_MAX_VF_QUEUES ||
-           vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+       if (i40e_vc_validate_vqs_bitmaps(vqs)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
index d07e1a8904283684f448e4a1346db6cfc6d4d9eb..f73cd917c44f72740c3339bd042d3c6e2f70ace9 100644 (file)
@@ -787,8 +787,12 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
        struct i40e_ring *ring;
 
+       if (test_bit(__I40E_CONFIG_BUSY, pf->state))
+               return -ENETDOWN;
+
        if (test_bit(__I40E_VSI_DOWN, vsi->state))
                return -ENETDOWN;
 
index 29de3ae96ef222d7545abead8c9004bacb312561..bd1b1ed323f4fe9e16134d31b99aad7341b5e89c 100644 (file)
@@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
 void iavf_disable_channels(struct iavf_adapter *adapter);
 void iavf_add_cloud_filter(struct iavf_adapter *adapter);
 void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+                                       const u8 *macaddr);
 #endif /* _IAVF_H_ */
index dad3eec8ccd86ec49fb543439a36ce2f3b53a717..84c3d8d97ef6f4fbc7278044afa56c1652eeadc7 100644 (file)
@@ -42,7 +42,7 @@ struct iavf_stats {
  */
 #define IAVF_STAT(_type, _name, _stat) { \
        .stat_string = _name, \
-       .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+       .sizeof_stat = sizeof_field(_type, _stat), \
        .stat_offset = offsetof(_type, _stat) \
 }
 
index 821987da5698ac8cf0468ac90e6f0fee460916ba..8e16be960e96b64196285916e268035cedf70449 100644 (file)
@@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
  *
  * Returns ptr to the filter object or NULL when no memory available.
  **/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
-                                const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+                                       const u8 *macaddr)
 {
        struct iavf_mac_filter *f;
 
@@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work)
        struct virtchnl_vf_resource *vfres = adapter->vf_res;
        struct net_device *netdev = adapter->netdev;
        struct iavf_hw *hw = &adapter->hw;
+       struct iavf_mac_filter *f, *ftmp;
        struct iavf_vlan_filter *vlf;
        struct iavf_cloud_filter *cf;
-       struct iavf_mac_filter *f;
        u32 reg_val;
        int i = 0, err;
        bool running;
@@ -2181,6 +2180,16 @@ continue_reset:
 
        spin_lock_bh(&adapter->mac_vlan_list_lock);
 
+       /* Delete filter for the current MAC address, it could have
+        * been changed by the PF via administratively set MAC.
+        * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+        */
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+                       list_del(&f->list);
+                       kfree(f);
+               }
+       }
        /* re-add all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
                f->add = true;
index c46770eba320e6b8a05d67e566db95da1636e506..1ab9cb339acb4d195c4433d9fc0254189586cc6b 100644 (file)
@@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                        ether_addr_copy(netdev->perm_addr,
                                        adapter->hw.mac.addr);
                }
+               spin_lock_bh(&adapter->mac_vlan_list_lock);
+               iavf_add_filter(adapter, adapter->hw.mac.addr);
+               spin_unlock_bh(&adapter->mac_vlan_list_lock);
                iavf_process_config(adapter);
                }
                break;
index aec3c6c379df86b7bbe503a602ff7406d0b5758c..9ebd93e79aeb64889ea4447c47a5e631ed2765d6 100644 (file)
@@ -15,7 +15,7 @@ struct ice_stats {
 
 #define ICE_STAT(_type, _name, _stat) { \
        .stat_string = _name, \
-       .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+       .sizeof_stat = sizeof_field(_type, _stat), \
        .stat_offset = offsetof(_type, _stat) \
 }
 
@@ -36,10 +36,10 @@ static int ice_q_stats_len(struct net_device *netdev)
 #define ICE_VSI_STATS_LEN      ARRAY_SIZE(ice_gstrings_vsi_stats)
 
 #define ICE_PFC_STATS_LEN ( \
-               (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
-                FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
-                FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
-                FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
+               (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \
+                sizeof_field(struct ice_pf, stats.priority_xon_rx) + \
+                sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \
+                sizeof_field(struct ice_pf, stats.priority_xon_tx)) \
                 / sizeof(u64))
 #define ICE_ALL_STATS_LEN(n)   (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
                                 ICE_VSI_STATS_LEN + ice_q_stats_len(n))
index ad34f22d44ef10761155c72ec44d58e4209aaf89..0997d352709b7dfcaba9383f43241e047a0391f9 100644 (file)
@@ -302,7 +302,7 @@ struct ice_ctx_ele {
 
 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) {   \
        .offset = offsetof(struct _struct, _ele),       \
-       .size_of = FIELD_SIZEOF(struct _struct, _ele),  \
+       .size_of = sizeof_field(struct _struct, _ele),  \
        .width = _width,                                \
        .lsb = _lsb,                                    \
 }
index 8a6ef351412921c729db50e686201b51ff4795ec..438b42ce2cd9aa84931c9e699dd14c2158bdb35f 100644 (file)
@@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
                dev_spec->module_plugged = true;
                if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
                        hw->phy.media_type = e1000_media_type_internal_serdes;
-               } else if (eth_flags->e100_base_fx) {
+               } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
                        dev_spec->sgmii_active = true;
                        hw->phy.media_type = e1000_media_type_internal_serdes;
                } else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                        break;
                }
 
-               /* do not change link mode for 100BaseFX */
-               if (dev_spec->eth_flags.e100_base_fx)
-                       break;
-
                /* change current link mode setting */
                ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
 
-               if (hw->phy.media_type == e1000_media_type_copper)
+               if (dev_spec->sgmii_active)
                        ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
                else
                        ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
index 3182b059bf55ce0be15487d9cbd03a894b75105f..445fbdce3e2592eed5fb2c2bf951dcacd714b85d 100644 (file)
@@ -26,7 +26,7 @@ struct igb_stats {
 
 #define IGB_STAT(_name, _stat) { \
        .stat_string = _name, \
-       .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+       .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \
        .stat_offset = offsetof(struct igb_adapter, _stat) \
 }
 static const struct igb_stats igb_gstrings_stats[] = {
@@ -76,7 +76,7 @@ static const struct igb_stats igb_gstrings_stats[] = {
 
 #define IGB_NETDEV_STAT(_net_stat) { \
        .stat_string = __stringify(_net_stat), \
-       .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+       .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \
        .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
 }
 static const struct igb_stats igb_gstrings_net_stats[] = {
@@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
                                advertising &= ~ADVERTISED_1000baseKX_Full;
                        }
                }
-               if (eth_flags->e100_base_fx) {
+               if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
                        supported |= SUPPORTED_100baseT_Full;
                        advertising |= ADVERTISED_100baseT_Full;
                }
index ac98f1d9689218107ba72ef045ff62d2defea1b6..455c1cdceb6e2b51e30a8ab04d1aaa65177a0d88 100644 (file)
@@ -16,7 +16,7 @@ struct igc_stats {
 
 #define IGC_STAT(_name, _stat) { \
        .stat_string = _name, \
-       .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \
+       .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \
        .stat_offset = offsetof(struct igc_adapter, _stat) \
 }
 
@@ -67,7 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = {
 
 #define IGC_NETDEV_STAT(_net_stat) { \
        .stat_string = __stringify(_net_stat), \
-       .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+       .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \
        .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
 }
 
index c8c93ac436d4c8c1729c833411a415805d3cde73..c65eb1afc8fb9848cad664049a62cfabad764886 100644 (file)
@@ -19,10 +19,10 @@ struct ixgb_stats {
 };
 
 #define IXGB_STAT(m)           IXGB_STATS, \
-                               FIELD_SIZEOF(struct ixgb_adapter, m), \
+                               sizeof_field(struct ixgb_adapter, m), \
                                offsetof(struct ixgb_adapter, m)
 #define IXGB_NETDEV_STAT(m)    NETDEV_STATS, \
-                               FIELD_SIZEOF(struct net_device, m), \
+                               sizeof_field(struct net_device, m), \
                                offsetof(struct net_device, m)
 
 static struct ixgb_stats ixgb_gstrings_stats[] = {
index 25c097cd8100f8d06e554577e65a8559d6bcbccd..a2b2ad1f60b111db43e1c2c161efb199e0aeafe7 100644 (file)
@@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct hlist_node *node2;
        struct ixgbe_fdir_filter *filter;
-       u64 action;
+       u8 queue;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
@@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
 
        hlist_for_each_entry_safe(filter, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
-               action = filter->action;
-               if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
-                       action =
-                       (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+               if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+                       queue = IXGBE_FDIR_DROP_QUEUE;
+               } else {
+                       u32 ring = ethtool_get_flow_spec_ring(filter->action);
+                       u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+                       if (!vf && (ring >= adapter->num_rx_queues)) {
+                               e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+                                     ring);
+                               continue;
+                       } else if (vf &&
+                                  ((vf > adapter->num_vfs) ||
+                                    ring >= adapter->num_rx_queues_per_pool)) {
+                               e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+                                     vf, ring);
+                               continue;
+                       }
+
+                       /* Map the ring onto the absolute queue index */
+                       if (!vf)
+                               queue = adapter->rx_ring[ring]->reg_idx;
+                       else
+                               queue = ((vf - 1) *
+                                       adapter->num_rx_queues_per_pool) + ring;
+               }
 
                ixgbe_fdir_write_perfect_filter_82599(hw,
-                               &filter->filter,
-                               filter->sw_idx,
-                               (action == IXGBE_FDIR_DROP_QUEUE) ?
-                               IXGBE_FDIR_DROP_QUEUE :
-                               adapter->rx_ring[action]->reg_idx);
+                               &filter->filter, filter->sw_idx, queue);
        }
 
        spin_unlock(&adapter->fdir_perfect_lock);
@@ -10261,7 +10278,12 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
 
        /* If transitioning XDP modes reconfigure rings */
        if (need_reset) {
-               int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
+               int err;
+
+               if (!prog)
+                       /* Wait until ndo_xsk_wakeup completes. */
+                       synchronize_rcu();
+               err = ixgbe_setup_tc(dev, adapter->hw_tcs);
 
                if (err) {
                        rcu_assign_pointer(adapter->xdp_prog, old_prog);
index d6feaacfbf898a285036d6d48308f01558c00486..b43be9f1410533470d20dc046070124be1ad33cf 100644 (file)
@@ -709,10 +709,14 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
        if (qid >= adapter->num_xdp_queues)
                return -ENXIO;
 
-       if (!adapter->xdp_ring[qid]->xsk_umem)
+       ring = adapter->xdp_ring[qid];
+
+       if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
+               return -ENETDOWN;
+
+       if (!ring->xsk_umem)
                return -ENXIO;
 
-       ring = adapter->xdp_ring[qid];
        if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
                u64 eics = BIT_ULL(ring->q_vector->v_idx);
 
index 54459b69c948481210cfd5795bd0912b100481a7..f7f309c96fa846c3e04301c9475acd5db38bb4f0 100644 (file)
@@ -31,14 +31,14 @@ struct ixgbe_stats {
 #define IXGBEVF_STAT(_name, _stat) { \
        .stat_string = _name, \
        .type = IXGBEVF_STATS, \
-       .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+       .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
        .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
 }
 
 #define IXGBEVF_NETDEV_STAT(_net_stat) { \
        .stat_string = #_net_stat, \
        .type = NETDEV_STATS, \
-       .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+       .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
        .stat_offset = offsetof(struct net_device_stats, _net_stat) \
 }
 
index 076f2da36f27825907ee817e8c919c72fde3a22d..64ec0e7c64b49504c108a1a423498e7e31808890 100644 (file)
@@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
        struct ixgbe_hw *hw = &adapter->hw;
        int count = 0;
 
-       if ((netdev_uc_count(netdev)) > 10) {
-               pr_err("Too many unicast filters - No Space\n");
-               return -ENOSPC;
-       }
-
        if (!netdev_uc_empty(netdev)) {
                struct netdev_hw_addr *ha;
 
index ae195f8adff588a85994d770d8c4bd8a3d91fc00..d3164537b694c2a3c5218372750660d6467206b3 100644 (file)
@@ -1043,7 +1043,7 @@ static int korina_probe(struct platform_device *pdev)
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
        dev->base_addr = r->start;
-       lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
+       lp->eth_regs = ioremap(r->start, resource_size(r));
        if (!lp->eth_regs) {
                printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
                rc = -ENXIO;
@@ -1051,7 +1051,7 @@ static int korina_probe(struct platform_device *pdev)
        }
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
-       lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+       lp->rx_dma_regs = ioremap(r->start, resource_size(r));
        if (!lp->rx_dma_regs) {
                printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
                rc = -ENXIO;
@@ -1059,7 +1059,7 @@ static int korina_probe(struct platform_device *pdev)
        }
 
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
-       lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+       lp->tx_dma_regs = ioremap(r->start, resource_size(r));
        if (!lp->tx_dma_regs) {
                printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
                rc = -ENXIO;
index 6e73ffe6f92870f0845e9d31a9dc9f59ebda4b7e..41f2f54807417c145b382dc4b403c262013469db 100644 (file)
@@ -649,7 +649,7 @@ ltq_etop_probe(struct platform_device *pdev)
                goto err_out;
        }
 
-       ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
+       ltq_etop_membase = devm_ioremap(&pdev->dev,
                res->start, resource_size(res));
        if (!ltq_etop_membase) {
                dev_err(&pdev->dev, "failed to remap etop engine %d\n",
index d5b644131cff5cba613157b3d85f665f2a29d8a5..65a093216dacfdd7509a550cff1c82235f5da6d2 100644 (file)
@@ -1432,11 +1432,11 @@ struct mv643xx_eth_stats {
 };
 
 #define SSTAT(m)                                               \
-       { #m, FIELD_SIZEOF(struct net_device_stats, m),         \
+       { #m, sizeof_field(struct net_device_stats, m),         \
          offsetof(struct net_device, stats.m), -1 }
 
 #define MIBSTAT(m)                                             \
-       { #m, FIELD_SIZEOF(struct mib_counters, m),             \
+       { #m, sizeof_field(struct mib_counters, m),             \
          -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
 
 static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
index 71a872d46bc487ea3a9113e03b8ccaec89f2c0f8..67ad8b8b127d0e1cfc41d851efd5e9d7e7b0c521 100644 (file)
@@ -2081,7 +2081,11 @@ static int
 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
               struct bpf_prog *prog, struct xdp_buff *xdp)
 {
-       u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+       unsigned int len;
+       u32 ret, act;
+
+       len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+       act = bpf_prog_run_xdp(prog, xdp);
 
        switch (act) {
        case XDP_PASS:
@@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                if (err) {
                        ret = MVNETA_XDP_DROPPED;
                        __page_pool_put_page(rxq->page_pool,
-                                       virt_to_head_page(xdp->data),
-                                       xdp->data_end - xdp->data_hard_start,
-                                       true);
+                                            virt_to_head_page(xdp->data),
+                                            len, true);
                } else {
                        ret = MVNETA_XDP_REDIR;
                }
@@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                ret = mvneta_xdp_xmit_back(pp, xdp);
                if (ret != MVNETA_XDP_TX)
                        __page_pool_put_page(rxq->page_pool,
-                                       virt_to_head_page(xdp->data),
-                                       xdp->data_end - xdp->data_hard_start,
-                                       true);
+                                            virt_to_head_page(xdp->data),
+                                            len, true);
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
        case XDP_DROP:
                __page_pool_put_page(rxq->page_pool,
                                     virt_to_head_page(xdp->data),
-                                    xdp->data_end - xdp->data_hard_start,
-                                    true);
+                                    len, true);
                ret = MVNETA_XDP_DROPPED;
                break;
        }
index 62dc2f362a1699a3cfaeccdee79f33e77198f9bd..14e372cda7f41730f3d25b0f69118b6035f9e7d3 100644 (file)
@@ -3680,7 +3680,7 @@ static int mvpp2_open(struct net_device *dev)
                valid = true;
        }
 
-       if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
+       if (priv->hw_version == MVPP22 && port->link_irq) {
                err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
                                  dev->name, port);
                if (err) {
index 095f6c71b4fa1dc61476be81f49c1b5c11a5c19b..7515d079c600c7bd6d5ba9dff3317d038f52e9c5 100644 (file)
@@ -3932,7 +3932,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        spin_lock_init(&hw->phy_lock);
        tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
 
-       hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+       hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
        if (!hw->regs) {
                dev_err(&pdev->dev, "cannot map device registers\n");
                goto err_out_free_hw;
index 5f56ee83e3b10d3eed5b3eda9e024ad5781d5cd1..535dee35e04ec11e7913f467f4b3c5b2b5506b3d 100644 (file)
@@ -5022,7 +5022,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->pdev = pdev;
        sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
 
-       hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+       hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
        if (!hw->regs) {
                dev_err(&pdev->dev, "cannot map device registers\n");
                goto err_out_free_hw;
index eaf08f7ad12843b661ce6cb0571ab3ab81e0871b..64ed725aec2858b6ead1990c6cb83d0eb448a0cb 100644 (file)
@@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
        crdump_enable_crspace_access(dev, cr_space);
 
        /* Get the available snapshot ID for the dumps */
-       id = devlink_region_shapshot_id_get(devlink);
+       id = devlink_region_snapshot_id_get(devlink);
 
        /* Try to capture dumps */
        mlx4_crdump_collect_crspace(dev, cr_space, id);
index a1202e53710cd0c7f16aa4f572ae0bc62da7cb6d..8bf1f08fdee26d0b305f4af0f4841c45b8810f5e 100644 (file)
@@ -611,7 +611,7 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
 }
 
 #define MLX4_LINK_MODES_SZ \
-       (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
+       (sizeof_field(struct mlx4_ptys_reg, eth_proto_cap) * 8)
 
 enum ethtool_report {
        SUPPORTED = 0,
index 2c16add0b642fb1a4d13d8f9807e337835aa157b..9c8427698238feffc4e2a2d654a12331005a7177 100644 (file)
@@ -760,7 +760,7 @@ enum {
        MLX5E_STATE_OPENED,
        MLX5E_STATE_DESTROYING,
        MLX5E_STATE_XDP_TX_ENABLED,
-       MLX5E_STATE_XDP_OPEN,
+       MLX5E_STATE_XDP_ACTIVE,
 };
 
 struct mlx5e_rqt {
index 68d593074f6c8f9765188c192b92a0fbdadd481c..d48292ccda294b917a3ca42876b34d5b6059afe7 100644 (file)
@@ -122,6 +122,22 @@ enum {
 #endif
 };
 
+#define MLX5E_TTC_NUM_GROUPS   3
+#define MLX5E_TTC_GROUP1_SIZE  (BIT(3) + MLX5E_NUM_TUNNEL_TT)
+#define MLX5E_TTC_GROUP2_SIZE   BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE   BIT(0)
+#define MLX5E_TTC_TABLE_SIZE   (MLX5E_TTC_GROUP1_SIZE +\
+                                MLX5E_TTC_GROUP2_SIZE +\
+                                MLX5E_TTC_GROUP3_SIZE)
+
+#define MLX5E_INNER_TTC_NUM_GROUPS     3
+#define MLX5E_INNER_TTC_GROUP1_SIZE    BIT(3)
+#define MLX5E_INNER_TTC_GROUP2_SIZE    BIT(1)
+#define MLX5E_INNER_TTC_GROUP3_SIZE    BIT(0)
+#define MLX5E_INNER_TTC_TABLE_SIZE     (MLX5E_INNER_TTC_GROUP1_SIZE +\
+                                        MLX5E_INNER_TTC_GROUP2_SIZE +\
+                                        MLX5E_INNER_TTC_GROUP3_SIZE)
+
 #ifdef CONFIG_MLX5_EN_RXNFC
 
 struct mlx5e_ethtool_table {
index 1d6b58860da6d40d6b2cb0dfebe292b9bc364ade..3a975641f902adbb5da9286e39dbe3bb84c71b61 100644 (file)
@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
                        struct devlink_health_reporter *reporter, char *err_str,
                        struct mlx5e_err_ctx *err_ctx)
 {
-       if (!reporter) {
-               netdev_err(priv->netdev, err_str);
+       netdev_err(priv->netdev, err_str);
+
+       if (!reporter)
                return err_ctx->recover(&err_ctx->ctx);
-       }
+
        return devlink_health_report(reporter, err_str, err_ctx);
 }
index 36ac1e3816b9d6856960c114a6f8134d1c1b534b..d7587f40ecaecab577545f8796c6ade7da39abaa 100644 (file)
@@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
 {
        set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+
+       if (priv->channels.params.xdp_prog)
+               set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
 }
 
 static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
 {
+       if (priv->channels.params.xdp_prog)
+               clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
+
        clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
-       /* let other device's napi(s) see our new state */
+       /* Let other device's napi(s) and XSK wakeups see our new state. */
        synchronize_rcu();
 }
 
@@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
        return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
 }
 
-static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv)
-{
-       set_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
-}
-
-static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv)
-{
-       clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
-}
-
-static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv)
+static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv)
 {
-       return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
+       return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state);
 }
 
 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
index 631af8dee5171d67447e5eefac4809a115bb4e3e..c28cbae4233103fda64496357525f96a2dfac98e 100644 (file)
@@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
 {
        clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
        napi_synchronize(&c->napi);
+       synchronize_rcu(); /* Sync with the XSK wakeup. */
 
        mlx5e_close_rq(&c->xskrq);
        mlx5e_close_cq(&c->xskrq.cq);
index 87827477d38c48dc71d795a1a142634a1c3a5933..fe2d596cb361fa1ae03bfbfe28c2741965804f4b 100644 (file)
@@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
        struct mlx5e_channel *c;
        u16 ix;
 
-       if (unlikely(!mlx5e_xdp_is_open(priv)))
+       if (unlikely(!mlx5e_xdp_is_active(priv)))
                return -ENETDOWN;
 
        if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
index 778dab1af8fc659cae40ead517bc94dc22f68a4a..f260dd96873bf3bacdb1b1ace307bc2861280d89 100644 (file)
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
 
 struct tx_sync_info {
        u64 rcd_sn;
-       s32 sync_len;
+       u32 sync_len;
        int nr_frags;
        skb_frag_t frags[MAX_SKB_FRAGS];
 };
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
 
 static enum mlx5e_ktls_sync_retval
 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
-                u32 tcp_seq, struct tx_sync_info *info)
+                u32 tcp_seq, int datalen, struct tx_sync_info *info)
 {
        struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
        enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
        struct tls_record_info *record;
        int remaining, i = 0;
        unsigned long flags;
+       bool ends_before;
 
        spin_lock_irqsave(&tx_ctx->lock, flags);
        record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
                goto out;
        }
 
-       if (unlikely(tcp_seq < tls_record_start_seq(record))) {
-               ret = tls_record_is_start_marker(record) ?
-                       MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+       /* There are the following cases:
+        * 1. packet ends before start marker: bypass offload.
+        * 2. packet starts before start marker and ends after it: drop,
+        *    not supported, breaks contract with kernel.
+        * 3. packet ends before tls record info starts: drop,
+        *    this packet was already acknowledged and its record info
+        *    was released.
+        */
+       ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+       if (unlikely(tls_record_is_start_marker(record))) {
+               ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+               goto out;
+       } else if (ends_before) {
+               ret = MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        u8 num_wqebbs;
        int i = 0;
 
-       ret = tx_sync_info_get(priv_tx, seq, &info);
+       ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
        if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
                if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
                        stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                goto err_out;
        }
 
-       if (unlikely(info.sync_len < 0)) {
-               if (likely(datalen <= -info.sync_len))
-                       return MLX5E_KTLS_SYNC_DONE;
-
-               stats->tls_drop_bypass_req++;
-               goto err_out;
-       }
-
        stats->tls_ooo++;
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        if (unlikely(contig_wqebbs_room < num_wqebbs))
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
-       tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
        for (; i < info.nr_frags; i++) {
                unsigned int orig_fsz, frag_offset = 0, n = 0;
                skb_frag_t *f = &info.frags[i];
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
                enum mlx5e_ktls_sync_retval ret =
                        mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
 
-               if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+               switch (ret) {
+               case MLX5E_KTLS_SYNC_DONE:
                        *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
-               else if (ret == MLX5E_KTLS_SYNC_FAIL)
+                       break;
+               case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+                       if (likely(!skb->decrypted))
+                               goto out;
+                       WARN_ON_ONCE(1);
+                       /* fall-through */
+               default: /* MLX5E_KTLS_SYNC_FAIL */
                        goto err_out;
-               else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
-                       goto out;
+               }
        }
 
        priv_tx->expected_seq = seq + datalen;
index 15b7f0f1427c201fe4e29e8c81304d4718e3f17c..73d3dc07331f194bccef3b32fa99d7b6e3230656 100644 (file)
@@ -904,22 +904,6 @@ del_rules:
        return err;
 }
 
-#define MLX5E_TTC_NUM_GROUPS   3
-#define MLX5E_TTC_GROUP1_SIZE  (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE   BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE   BIT(0)
-#define MLX5E_TTC_TABLE_SIZE   (MLX5E_TTC_GROUP1_SIZE +\
-                                MLX5E_TTC_GROUP2_SIZE +\
-                                MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS     3
-#define MLX5E_INNER_TTC_GROUP1_SIZE    BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE    BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE    BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE     (MLX5E_INNER_TTC_GROUP1_SIZE +\
-                                        MLX5E_INNER_TTC_GROUP2_SIZE +\
-                                        MLX5E_INNER_TTC_GROUP3_SIZE)
-
 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
                                         bool use_ipv)
 {
index 4980e80a5e85ddb58fa66cda25c6280724be1ed2..4997b8a51994bc6b23f4ef5cfa0375192975711f 100644 (file)
@@ -3000,12 +3000,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 int mlx5e_open_locked(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       bool is_xdp = priv->channels.params.xdp_prog;
        int err;
 
        set_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (is_xdp)
-               mlx5e_xdp_set_open(priv);
 
        err = mlx5e_open_channels(priv, &priv->channels);
        if (err)
@@ -3020,8 +3017,6 @@ int mlx5e_open_locked(struct net_device *netdev)
        return 0;
 
 err_clear_state_opened_flag:
-       if (is_xdp)
-               mlx5e_xdp_set_closed(priv);
        clear_bit(MLX5E_STATE_OPENED, &priv->state);
        return err;
 }
@@ -3053,8 +3048,6 @@ int mlx5e_close_locked(struct net_device *netdev)
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
-       if (priv->channels.params.xdp_prog)
-               mlx5e_xdp_set_closed(priv);
        clear_bit(MLX5E_STATE_OPENED, &priv->state);
 
        netif_carrier_off(priv->netdev);
@@ -4371,16 +4364,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
        return 0;
 }
 
-static int mlx5e_xdp_update_state(struct mlx5e_priv *priv)
-{
-       if (priv->channels.params.xdp_prog)
-               mlx5e_xdp_set_open(priv);
-       else
-               mlx5e_xdp_set_closed(priv);
-
-       return 0;
-}
-
 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4415,7 +4398,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
                mlx5e_set_rq_type(priv->mdev, &new_channels.params);
                old_prog = priv->channels.params.xdp_prog;
 
-               err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state);
+               err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
                if (err)
                        goto unlock;
        } else {
index 9b32a9c0f4979100949af0f1efc02a5d02e5f847..7e32b9e3667c66b2b6992f17eb230eb907b5df36 100644 (file)
@@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
                ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
 
-       ft_attr->max_fte = MLX5E_NUM_TT;
+       ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
        ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
        ft_attr->prio = MLX5E_TC_PRIO;
 }
@@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info
        return kmemdup(tun_info, tun_size, GFP_KERNEL);
 }
 
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+                                     struct mlx5e_tc_flow *flow,
+                                     int out_index,
+                                     struct mlx5e_encap_entry *e,
+                                     struct netlink_ext_ack *extack)
+{
+       int i;
+
+       for (i = 0; i < out_index; i++) {
+               if (flow->encaps[i].e != e)
+                       continue;
+               NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+               netdev_err(priv->netdev, "can't duplicate encap action\n");
+               return true;
+       }
+
+       return false;
+}
+
 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow,
                              struct net_device *mirred_dev,
@@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
 
        /* must verify if encap is valid or not */
        if (e) {
+               /* Check that entry was not already attached to this flow */
+               if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+                       err = -EOPNOTSUPP;
+                       goto out_err;
+               }
+
                mutex_unlock(&esw->offloads.encap_tbl_lock);
                wait_for_completion(&e->res_ready);
 
@@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
               same_hw_devs(priv, netdev_priv(out_dev));
 }
 
+static bool is_duplicated_output_device(struct net_device *dev,
+                                       struct net_device *out_dev,
+                                       int *ifindexes, int if_count,
+                                       struct netlink_ext_ack *extack)
+{
+       int i;
+
+       for (i = 0; i < if_count; i++) {
+               if (ifindexes[i] == out_dev->ifindex) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "can't duplicate output to same device");
+                       netdev_err(dev, "can't duplicate output to same device: %s\n",
+                                  out_dev->name);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                struct flow_action *flow_action,
                                struct mlx5e_tc_flow *flow,
@@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        const struct ip_tunnel_info *info = NULL;
+       int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
        bool ft_flow = mlx5e_is_ft_flow(flow);
        const struct flow_action_entry *act;
+       int err, i, if_count = 0;
        bool encap = false;
        u32 action = 0;
-       int err, i;
 
        if (!flow_action_has_entries(flow_action))
                return -EINVAL;
@@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
                                struct net_device *uplink_upper;
 
+                               if (is_duplicated_output_device(priv->netdev,
+                                                               out_dev,
+                                                               ifindexes,
+                                                               if_count,
+                                                               extack))
+                                       return -EOPNOTSUPP;
+
+                               ifindexes[if_count] = out_dev->ifindex;
+                               if_count++;
+
                                rcu_read_lock();
                                uplink_upper =
                                        netdev_master_upper_dev_get_rcu(uplink_dev);
@@ -3980,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
        u32 rate_mbps;
        int err;
 
+       vport_num = rpriv->rep->vport;
+       if (vport_num >= MLX5_VPORT_ECPF) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+               return -EOPNOTSUPP;
+       }
+
        esw = priv->mdev->priv.eswitch;
        /* rate is given in bytes/sec.
         * First convert to bits/sec and then round to the nearest mbit/secs.
@@ -3988,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
         * 1 mbit/sec.
         */
        rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
-       vport_num = rpriv->rep->vport;
-
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
index 2c965ad0d74421e85c46b3e5d65a9dbcee9a7551..3df3604e8929a1621c951484fe17820822f5a4e6 100644 (file)
@@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
        struct mlx5_vport *vport;
        int i;
 
-       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
                memset(&vport->info, 0, sizeof(vport->info));
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+       }
 }
 
 /* Public E-Switch API */
index 243a5440867e19b5378e4dbb5d0cdce64623db13..3e6412783078fecb1985166ef1a48a3f19e930a3 100644 (file)
@@ -866,7 +866,7 @@ out:
  */
 #define ESW_SIZE (16 * 1024 * 1024)
 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
-                                   64 * 1024, 4 * 1024 };
+                                   64 * 1024, 128 };
 
 static int
 get_sz_from_pool(struct mlx5_eswitch *esw)
@@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
                return -EINVAL;
        }
 
-       mlx5_eswitch_disable(esw, false);
+       mlx5_eswitch_disable(esw, true);
        mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
        if (err) {
@@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
 
 int esw_offloads_enable(struct mlx5_eswitch *esw)
 {
-       int err;
+       struct mlx5_vport *vport;
+       int err, i;
 
        if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
            MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        if (err)
                goto err_vport_metadata;
 
+       /* Representor will control the vport link state */
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
        err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
        if (err)
                goto err_vports;
@@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
 {
        int err, err1;
 
-       mlx5_eswitch_disable(esw, false);
+       mlx5_eswitch_disable(esw, true);
        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
index c76da309506b257307ba1de0582176bc60780d83..e4ec0e03c289490f2c96f93e5a73757e51d3deef 100644 (file)
@@ -87,10 +87,10 @@ static const struct rhashtable_params rhash_sa = {
         * value is not constant during the lifetime
         * of the key object.
         */
-       .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
-                  FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+       .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
+                  sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
        .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
-                     FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+                     sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
        .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
        .automatic_shrinking = true,
        .min_size = 1,
index d6057748456768566478d7022b378c8ae879f185..8c5df6c7d7b6b98fc73c1c9750433f11190df01c 100644 (file)
@@ -209,7 +209,7 @@ enum fs_i_lock_class {
 };
 
 static const struct rhashtable_params rhash_fte = {
-       .key_len = FIELD_SIZEOF(struct fs_fte, val),
+       .key_len = sizeof_field(struct fs_fte, val),
        .key_offset = offsetof(struct fs_fte, val),
        .head_offset = offsetof(struct fs_fte, hash),
        .automatic_shrinking = true,
@@ -217,7 +217,7 @@ static const struct rhashtable_params rhash_fte = {
 };
 
 static const struct rhashtable_params rhash_fg = {
-       .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
+       .key_len = sizeof_field(struct mlx5_flow_group, mask),
        .key_offset = offsetof(struct mlx5_flow_group, mask),
        .head_offset = offsetof(struct mlx5_flow_group, hash),
        .automatic_shrinking = true,
@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node)
        }
 }
 
-static void del_sw_fte_rcu(struct rcu_head *head)
-{
-       struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
-       struct mlx5_flow_steering *steering = get_steering(&fte->node);
-
-       kmem_cache_free(steering->ftes_cache, fte);
-}
-
 static void del_sw_fte(struct fs_node *node)
 {
+       struct mlx5_flow_steering *steering = get_steering(node);
        struct mlx5_flow_group *fg;
        struct fs_fte *fte;
        int err;
@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node)
                                     rhash_fte);
        WARN_ON(err);
        ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
-
-       call_rcu(&fte->rcu, del_sw_fte_rcu);
+       kmem_cache_free(steering->ftes_cache, fte);
 }
 
 static void del_hw_flow_group(struct fs_node *node)
@@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
 }
 
 static struct fs_fte *
-lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
+lookup_fte_locked(struct mlx5_flow_group *g,
+                 const u32 *match_value,
+                 bool take_write)
 {
        struct fs_fte *fte_tmp;
 
-       nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
-
-       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
-       if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
-               fte_tmp = NULL;
-               goto out;
-       }
-
-       if (!fte_tmp->node.active) {
-               tree_put_node(&fte_tmp->node, false);
-               fte_tmp = NULL;
-               goto out;
-       }
-       nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
-out:
-       up_write_ref_node(&g->node, false);
-       return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
-{
-       struct fs_fte *fte_tmp;
-
-       if (!tree_get_node(&g->node))
-               return NULL;
-
-       rcu_read_lock();
-       fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+       if (take_write)
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+       else
+               nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+                                        rhash_fte);
        if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
-               rcu_read_unlock();
                fte_tmp = NULL;
                goto out;
        }
-       rcu_read_unlock();
-
        if (!fte_tmp->node.active) {
                tree_put_node(&fte_tmp->node, false);
                fte_tmp = NULL;
@@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
        }
 
        nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
 out:
-       tree_put_node(&g->node, false);
-       return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
-{
-       if (write)
-               return lookup_fte_for_write_locked(g, match_value);
+       if (take_write)
+               up_write_ref_node(&g->node, false);
        else
-               return lookup_fte_for_read_locked(g, match_value);
+               up_read_ref_node(&g->node);
+       return fte_tmp;
 }
 
 static struct mlx5_flow_handle *
index e8cd997f413eb86cf5f3e5e75e7a75801a8c4f12..c2621b9115633b53fe21d738bcd909f53d1bf108 100644 (file)
@@ -203,7 +203,6 @@ struct fs_fte {
        enum fs_fte_status              status;
        struct mlx5_fc                  *counter;
        struct rhash_head               hash;
-       struct rcu_head rcu;
        int                             modify_mask;
 };
 
index 173e2c12e1c782ea02099920a5857a8206e72bb9..f554cfddcf4e88e59a230743e351385899542c27 100644 (file)
@@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
        if (err)
                goto err_load;
 
+       if (boot) {
+               err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+               if (err)
+                       goto err_devlink_reg;
+       }
+
        if (mlx5_device_registered(dev)) {
                mlx5_attach_device(dev);
        } else {
@@ -1210,6 +1216,9 @@ out:
        return err;
 
 err_reg_dev:
+       if (boot)
+               mlx5_devlink_unregister(priv_to_devlink(dev));
+err_devlink_reg:
        mlx5_unload(dev);
 err_load:
        if (boot)
@@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        request_module_nowait(MLX5_IB_MOD);
 
-       err = mlx5_devlink_register(devlink, &pdev->dev);
-       if (err)
-               goto clean_load;
-
        err = mlx5_crdump_enable(dev);
        if (err)
                dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
@@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        pci_save_state(pdev);
        return 0;
 
-clean_load:
-       mlx5_unload_one(dev, true);
-
 err_load_one:
        mlx5_pci_close(dev);
 pci_init_err:
@@ -1561,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x101d) },                      /* ConnectX-6 Dx */
        { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},   /* ConnectX Family mlx5Gen Virtual Function */
        { PCI_VDEVICE(MELLANOX, 0x101f) },                      /* ConnectX-6 LX */
+       { PCI_VDEVICE(MELLANOX, 0x1021) },                      /* ConnectX-7 */
        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
        { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
index 32e94d2ee5e4ab0381f4c904a0b9031f3709126e..e4cff7abb348e1087ed85bfef733a990912ac947 100644 (file)
@@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
        /* We need to copy the refcount since this ste
         * may have been traversed several times
         */
-       refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+       new_ste->refcount = cur_ste->refcount;
 
        /* Link old STEs rule_mem list to the new ste */
        mlx5dr_rule_update_rule_member(cur_ste, new_ste);
@@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
        if (!rule_mem)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&rule_mem->list);
+       INIT_LIST_HEAD(&rule_mem->use_ste_list);
+
        rule_mem->ste = ste;
        list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
 
index 51803eef13ddc5cc59453d671e9be53603187fd3..c7f10d4f8f8d70561a4bb58e00309f99093260cd 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 /* Copyright (c) 2019 Mellanox Technologies. */
 
+#include <linux/smp.h>
 #include "dr_types.h"
 
 #define QUEUE_SIZE 128
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        if (!in)
                goto err_cqwq;
 
-       vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+       vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
        err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
        if (err) {
                kvfree(in);
index a5a266983dd33395536e1b73f48fb63c19d3f020..c6c7d1defbd788ee657f45c8e3914c6e8393243e 100644 (file)
@@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
        if (dst->next_htbl)
                dst->next_htbl->pointing_ste = dst;
 
-       refcount_set(&dst->refcount, refcount_read(&src->refcount));
+       dst->refcount = src->refcount;
 
        INIT_LIST_HEAD(&dst->rule_list);
        list_splice_tail_init(&src->rule_list, &dst->rule_list);
@@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
 
 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
 {
-       return !refcount_read(&ste->refcount);
+       return !ste->refcount;
 }
 
 /* Init one ste as a pattern for ste data array */
@@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
        htbl->ste_arr = chunk->ste_arr;
        htbl->hw_ste_arr = chunk->hw_ste_arr;
        htbl->miss_list = chunk->miss_list;
-       refcount_set(&htbl->refcount, 0);
+       htbl->refcount = 0;
 
        for (i = 0; i < chunk->num_of_entries; i++) {
                struct mlx5dr_ste *ste = &htbl->ste_arr[i];
 
                ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
                ste->htbl = htbl;
-               refcount_set(&ste->refcount, 0);
+               ste->refcount = 0;
                INIT_LIST_HEAD(&ste->miss_list_node);
                INIT_LIST_HEAD(&htbl->miss_list[i]);
                INIT_LIST_HEAD(&ste->rule_list);
@@ -713,7 +713,7 @@ out_free_htbl:
 
 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
 {
-       if (refcount_read(&htbl->refcount))
+       if (htbl->refcount)
                return -EBUSY;
 
        mlx5dr_icm_free_chunk(htbl->chunk);
index 290fe61c33d0bc37152880c17647870c24365a59..3fdf4a5eb031b72d67f2290cf61a2d4152a8fc92 100644 (file)
@@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx;
 struct mlx5dr_ste {
        u8 *hw_ste;
        /* refcount: indicates the num of rules that using this ste */
-       refcount_t refcount;
+       u32 refcount;
 
        /* attached to the miss_list head at each htbl entry */
        struct list_head miss_list_node;
@@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
 struct mlx5dr_ste_htbl {
        u8 lu_type;
        u16 byte_mask;
-       refcount_t refcount;
+       u32 refcount;
        struct mlx5dr_icm_chunk *chunk;
        struct mlx5dr_ste *ste_arr;
        u8 *hw_ste_arr;
@@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
 
 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
 {
-       if (refcount_dec_and_test(&htbl->refcount))
+       htbl->refcount--;
+       if (!htbl->refcount)
                mlx5dr_ste_htbl_free(htbl);
 }
 
 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
 {
-       refcount_inc(&htbl->refcount);
+       htbl->refcount++;
 }
 
 /* STE utils */
@@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
                                  struct mlx5dr_matcher *matcher,
                                  struct mlx5dr_matcher_rx_tx *nic_matcher)
 {
-       if (refcount_dec_and_test(&ste->refcount))
+       ste->refcount--;
+       if (!ste->refcount)
                mlx5dr_ste_free(ste, matcher, nic_matcher);
 }
 
 /* initial as 0, increased only when ste appears in a new rule */
 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
 {
-       refcount_inc(&ste->refcount);
+       ste->refcount++;
 }
 
 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
index 3d587d0bdbbe3f9457d64c024f4403cd248ff6e4..1e32e2443f7378dd2b63b2df83052184f1895321 100644 (file)
@@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
        if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
                list_for_each_entry(dst, &fte->node.children, node.list) {
                        enum mlx5_flow_destination_type type = dst->dest_attr.type;
-                       u32 id;
 
                        if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
                                err = -ENOSPC;
                                goto free_actions;
                        }
 
-                       switch (type) {
-                       case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
-                               id = dst->dest_attr.counter_id;
+                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
 
-                               tmp_action =
-                                       mlx5dr_action_create_flow_counter(id);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               actions[num_actions++] = tmp_action;
-                               break;
+                       switch (type) {
                        case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
                                tmp_action = create_ft_action(dev, dst);
                                if (!tmp_action) {
@@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                }
        }
 
+       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       u32 id;
+
+                       if (dst->dest_attr.type !=
+                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -ENOSPC;
+                               goto free_actions;
+                       }
+
+                       id = dst->dest_attr.counter_id;
+                       tmp_action =
+                               mlx5dr_action_create_flow_counter(id);
+                       if (!tmp_action) {
+                               err = -ENOMEM;
+                               goto free_actions;
+                       }
+
+                       fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                       actions[num_actions++] = tmp_action;
+               }
+       }
+
        params.match_sz = match_sz;
        params.match_buf = (u64 *)fte->val;
 
index 544344ac4894cb5d24335940afd703fe2fcd0d9a..79057af4fe997a9760d58af40b40ba0f19b9d647 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/netlink.h>
+#include <linux/vmalloc.h>
 #include <linux/xz.h>
 #include "mlxfw_mfa2.h"
 #include "mlxfw_mfa2_file.h"
@@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
        comp_size = be32_to_cpu(comp->size);
        comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
 
-       comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+       comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
        if (!comp_data)
                return ERR_PTR(-ENOMEM);
        comp_data->comp.data_size = comp_size;
@@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
        comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
        return &comp_data->comp;
 err_out:
-       kfree(comp_data);
+       vfree(comp_data);
        return ERR_PTR(err);
 }
 
@@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
        const struct mlxfw_mfa2_comp_data *comp_data;
 
        comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
-       kfree(comp_data);
+       vfree(comp_data);
 }
 
 void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
index 5294a1622643ead2c662c4915bb670234cace2cb..af30e8a76682093b00d8fca0c7d3d06ca9eb0aba 100644 (file)
@@ -5472,6 +5472,7 @@ enum mlxsw_reg_htgt_trap_group {
        MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+       MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
 
        __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
        MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
index 556dca328bb514b84ce6e34f8e01f719d24b6408..8ed15199eb4f3ffba26beebf34fbe16caaa0550d 100644 (file)
@@ -860,23 +860,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
        u64 len;
        int err;
 
+       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+               this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
 
        if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
                return NETDEV_TX_BUSY;
 
-       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
-               struct sk_buff *skb_orig = skb;
-
-               skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
-               if (!skb) {
-                       this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
-                       dev_kfree_skb_any(skb_orig);
-                       return NETDEV_TX_OK;
-               }
-               dev_consume_skb_any(skb_orig);
-       }
-
        if (eth_skb_pad(skb)) {
                this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
                return NETDEV_TX_OK;
@@ -1215,6 +1209,9 @@ static void update_stats_cache(struct work_struct *work)
                             periodic_hw_stats.update_dw.work);
 
        if (!netif_carrier_ok(mlxsw_sp_port->dev))
+               /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+                * necessary when port goes down.
+                */
                goto out;
 
        mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
@@ -4324,6 +4321,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
        return 0;
 }
 
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int i;
+
+       for (i = 0; i < TC_MAX_QUEUE; i++)
+               mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
                                     char *pude_pl, void *priv)
 {
@@ -4345,6 +4351,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
        } else {
                netdev_info(mlxsw_sp_port->dev, "link down\n");
                netif_carrier_off(mlxsw_sp_port->dev);
+               mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
        }
 }
 
@@ -4542,8 +4549,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
        MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
        MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
-       MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
-       MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+       MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
+       MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
        /* PKT Sample trap */
        MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
                  false, SP_IP2ME, DISCARD),
@@ -4626,6 +4633,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
                        rate = 19 * 1024;
                        burst_size = 12;
                        break;
+               case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
+                       rate = 360;
+                       burst_size = 7;
+                       break;
                default:
                        continue;
                }
@@ -4665,6 +4676,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
+               case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
                        priority = 5;
                        tc = 5;
                        break;
@@ -5127,6 +5139,27 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
        return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
 }
 
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_bus_info *mlxsw_bus_info,
+                         struct netlink_ext_ack *extack)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+       mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+       mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+       mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+       mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+       mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+       mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+       mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+       mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+       mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+       mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+       mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+
+       return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -5629,7 +5662,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
 static struct mlxsw_driver mlxsw_sp3_driver = {
        .kind                           = mlxsw_sp3_driver_name,
        .priv_size                      = sizeof(struct mlxsw_sp),
-       .init                           = mlxsw_sp2_init,
+       .init                           = mlxsw_sp3_init,
        .fini                           = mlxsw_sp_fini,
        .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
        .port_split                     = mlxsw_sp_port_split,
index 150b3a144b83e8c609a212867ed4870504f504eb..3d3cca5961163aec45a66bd96d3435ff1e598d7a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/string.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
+#include <linux/mutex.h>
 #include <net/net_namespace.h>
 #include <net/tc_act/tc_vlan.h>
 
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
        struct mlxsw_sp_fid *dummy_fid;
        struct rhashtable ruleset_ht;
        struct list_head rules;
+       struct mutex rules_lock; /* Protects rules list */
        struct {
                struct delayed_work dw;
                unsigned long interval; /* ms */
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
                        goto err_ruleset_block_bind;
        }
 
+       mutex_lock(&mlxsw_sp->acl->rules_lock);
        list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+       mutex_unlock(&mlxsw_sp->acl->rules_lock);
        block->rule_count++;
        block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
        return 0;
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
 
        block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
        ruleset->ht_key.block->rule_count--;
+       mutex_lock(&mlxsw_sp->acl->rules_lock);
        list_del(&rule->list);
+       mutex_unlock(&mlxsw_sp->acl->rules_lock);
        if (!ruleset->ht_key.chain_index &&
            mlxsw_sp_acl_ruleset_is_singular(ruleset))
                mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
        struct mlxsw_sp_acl_rule *rule;
        int err;
 
-       /* Protect internal structures from changes */
-       rtnl_lock();
+       mutex_lock(&acl->rules_lock);
        list_for_each_entry(rule, &acl->rules, list) {
                err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
                                                        rule);
                if (err)
                        goto err_rule_update;
        }
-       rtnl_unlock();
+       mutex_unlock(&acl->rules_lock);
        return 0;
 
 err_rule_update:
-       rtnl_unlock();
+       mutex_unlock(&acl->rules_lock);
        return err;
 }
 
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        acl->dummy_fid = fid;
 
        INIT_LIST_HEAD(&acl->rules);
+       mutex_init(&acl->rules_lock);
        err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
        if (err)
                goto err_acl_ops_init;
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        return 0;
 
 err_acl_ops_init:
+       mutex_destroy(&acl->rules_lock);
        mlxsw_sp_fid_put(fid);
 err_fid_get:
        rhashtable_destroy(&acl->ruleset_ht);
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
 
        cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
        mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+       mutex_destroy(&acl->rules_lock);
        WARN_ON(!list_empty(&acl->rules));
        mlxsw_sp_fid_put(acl->dummy_fid);
        rhashtable_destroy(&acl->ruleset_ht);
index 68cc6737d45ce375af72e29f25c3a51fde6b66dc..0124bfe1963b7fc08b8b9732daa4ca410bba3427 100644 (file)
@@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
        return -EOPNOTSUPP;
 }
 
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+       return xstats->backlog[tclass_num] +
+              xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+       return xstats->tail_drop[tclass_num] +
+              xstats->tail_drop[tclass_num + 8];
+}
+
 static void
 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
                                       u8 prio_bitmap, u64 *tx_packets,
@@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
                                               &stats_base->tx_bytes);
        red_base->prob_mark = xstats->ecn;
        red_base->prob_drop = xstats->wred_drop[tclass_num];
-       red_base->pdrop = xstats->tail_drop[tclass_num];
+       red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
 
        stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
        stats_base->drops = red_base->prob_drop + red_base->pdrop;
@@ -370,7 +384,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
        marks = xstats->ecn - xstats_base->prob_mark;
-       pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+       pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+                xstats_base->pdrop;
 
        res->pdrop += pdrops;
        res->prob_drop += early_drops;
@@ -403,9 +418,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
                     stats_base->overlimits;
-       drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
+       drops = xstats->wred_drop[tclass_num] +
+               mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
                stats_base->drops;
-       backlog = xstats->backlog[tclass_num];
+       backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
 
        _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
        stats_ptr->qstats->overlimits += overlimits;
@@ -576,9 +592,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
        tx_packets = stats->tx_packets - stats_base->tx_packets;
 
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               drops += xstats->tail_drop[i];
+               drops += mlxsw_sp_xstats_tail_drop(xstats, i);
                drops += xstats->wred_drop[i];
-               backlog += xstats->backlog[i];
+               backlog += mlxsw_sp_xstats_backlog(xstats, i);
        }
        drops = drops - stats_base->drops;
 
@@ -614,7 +630,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 
        stats_base->drops = 0;
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               stats_base->drops += xstats->tail_drop[i];
+               stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
                stats_base->drops += xstats->wred_drop[i];
        }
 
@@ -651,6 +667,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
            mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
                return 0;
 
+       if (!p->child_handle) {
+               /* This is an invisible FIFO replacing the original Qdisc.
+                * Ignore it--the original Qdisc's destroy will follow.
+                */
+               return 0;
+       }
+
        /* See if the grafted qdisc is already offloaded on any tclass. If so,
         * unoffload it.
         */
index 30bfe3880fafca38ef028ee35d52d463735d73b6..8290e82240fc0046ed073193512dd48ad51dd859 100644 (file)
@@ -5742,8 +5742,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
        if (mlxsw_sp_fib6_rt_should_ignore(rt))
                return;
 
+       /* Multipath routes are first added to the FIB trie and only then
+        * notified. If we vetoed the addition, we will get a delete
+        * notification for a route we do not have. Therefore, do not warn if
+        * route was not found.
+        */
        fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
-       if (WARN_ON(!fib6_entry))
+       if (!fib6_entry)
                return;
 
        /* If not all the nexthops are deleted, then only reduce the nexthop
@@ -7074,6 +7079,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
                rif = mlxsw_sp->router->rifs[i];
+               if (rif && rif->ops &&
+                   rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
+                       continue;
                if (rif && rif->dev && rif->dev != dev &&
                    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
                                             mlxsw_sp->mac_mask)) {
index de6cb22f68b1fd0a17a08d7470a6d8afac02fadf..f0e98ec8f1eeac8b48a5cebb1986e98574fe1bf3 100644 (file)
@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
        u64 len;
        int err;
 
+       if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+               this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
        memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
 
        if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
                return NETDEV_TX_BUSY;
 
-       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
-               struct sk_buff *skb_orig = skb;
-
-               skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
-               if (!skb) {
-                       this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
-                       dev_kfree_skb_any(skb_orig);
-                       return NETDEV_TX_OK;
-               }
-               dev_consume_skb_any(skb_orig);
-       }
        mlxsw_sx_txhdr_construct(skb, &tx_info);
        /* TX header is consumed by HW on the way so we shouldn't count its
         * bytes as being sent.
index 6af9a7eee114969ed874161ffe059bfba57f40ac..09125406105258af591e860c540c343abd06d7fa 100644 (file)
@@ -1937,7 +1937,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
 
        pci_set_master(pci_dev);
        addr = pci_resource_start(pci_dev, 1);
-       dev->base = ioremap_nocache(addr, PAGE_SIZE);
+       dev->base = ioremap(addr, PAGE_SIZE);
        dev->tx_descs = pci_alloc_consistent(pci_dev,
                        4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
        dev->rx_info.descs = pci_alloc_consistent(pci_dev,
index b339125b2f097fcfdafd7e45f9ee8c5eeac5190a..05e760444a92c4c1473f3e426dacdd73f13cda17 100644 (file)
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
 
        netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
 
+       spin_lock_init(&lp->lock);
+
        for (i = 0; i < SONIC_NUM_RRS; i++) {
                struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
                if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
        return 0;
 }
 
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+       struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+       int i;
+       u16 bits;
+
+       for (i = 0; i < 1000; ++i) {
+               bits = SONIC_READ(SONIC_CMD) & mask;
+               if (!bits)
+                       return;
+               if (irqs_disabled() || in_interrupt())
+                       udelay(20);
+               else
+                       usleep_range(100, 200);
+       }
+       WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
 
 /*
  * Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
        /*
         * stop the SONIC, disable interrupts
         */
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       sonic_quiesce(dev, SONIC_CR_ALL);
+
        SONIC_WRITE(SONIC_IMR, 0);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
         * put the Sonic into software-reset mode and
         * disable all interrupts before releasing DMA buffers
         */
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       sonic_quiesce(dev, SONIC_CR_ALL);
+
        SONIC_WRITE(SONIC_IMR, 0);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
  *   wake the tx queue
  * Concurrently with all of this, the SONIC is potentially writing to
  * the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
  */
 
 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
        struct sonic_local *lp = netdev_priv(dev);
        dma_addr_t laddr;
        int length;
-       int entry = lp->next_tx;
+       int entry;
+       unsigned long flags;
 
        netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
 
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       spin_lock_irqsave(&lp->lock, flags);
+
+       entry = lp->next_tx;
+
        sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
        sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
        sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
        sonic_tda_put(dev, entry, SONIC_TD_LINK,
                sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
 
-       /*
-        * Must set tx_skb[entry] only after clearing status, and
-        * before clearing EOL and before stopping queue
-        */
        wmb();
        lp->tx_len[entry] = length;
        lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
 
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
        struct net_device *dev = dev_id;
        struct sonic_local *lp = netdev_priv(dev);
        int status;
+       unsigned long flags;
+
+       /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+        * with sonic_send_packet() so that the two functions can share state.
+        * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+        * by macsonic which must use two IRQs with different priority levels.
+        */
+       spin_lock_irqsave(&lp->lock, flags);
+
+       status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+       if (!status) {
+               spin_unlock_irqrestore(&lp->lock, flags);
 
-       if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
                return IRQ_NONE;
+       }
 
        do {
+               SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
                if (status & SONIC_INT_PKTRX) {
                        netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
                        sonic_rx(dev);  /* got packet(s) */
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
                }
 
                if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        int td_status;
                        int freed_some = 0;
 
-                       /* At this point, cur_tx is the index of a TD that is one of:
-                        *   unallocated/freed                          (status set   & tx_skb[entry] clear)
-                        *   allocated and sent                         (status set   & tx_skb[entry] set  )
-                        *   allocated and not yet sent                 (status clear & tx_skb[entry] set  )
-                        *   still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+                       /* The state of a Transmit Descriptor may be inferred
+                        * from { tx_skb[entry], td_status } as follows.
+                        * { clear, clear } => the TD has never been used
+                        * { set,   clear } => the TD was handed to SONIC
+                        * { set,   set   } => the TD was handed back
+                        * { clear, set   } => the TD is available for re-use
                         */
 
                        netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                                if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
                                        break;
 
-                               if (td_status & 0x0001) {
+                               if (td_status & SONIC_TCR_PTX) {
                                        lp->stats.tx_packets++;
                                        lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
                                } else {
-                                       lp->stats.tx_errors++;
-                                       if (td_status & 0x0642)
+                                       if (td_status & (SONIC_TCR_EXD |
+                                           SONIC_TCR_EXC | SONIC_TCR_BCM))
                                                lp->stats.tx_aborted_errors++;
-                                       if (td_status & 0x0180)
+                                       if (td_status &
+                                           (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
                                                lp->stats.tx_carrier_errors++;
-                                       if (td_status & 0x0020)
+                                       if (td_status & SONIC_TCR_OWC)
                                                lp->stats.tx_window_errors++;
-                                       if (td_status & 0x0004)
+                                       if (td_status & SONIC_TCR_FU)
                                                lp->stats.tx_fifo_errors++;
                                }
 
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        if (freed_some || lp->tx_skb[entry] == NULL)
                                netif_wake_queue(dev);  /* The ring is no longer full */
                        lp->cur_tx = entry;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
                }
 
                /*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                if (status & SONIC_INT_RFO) {
                        netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
                                  __func__);
-                       lp->stats.rx_fifo_errors++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
                }
                if (status & SONIC_INT_RDE) {
                        netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
                                  __func__);
-                       lp->stats.rx_dropped++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
                }
                if (status & SONIC_INT_RBAE) {
                        netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
                                  __func__);
-                       lp->stats.rx_dropped++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
                }
 
                /* counter overruns; all counters are 16bit wide */
-               if (status & SONIC_INT_FAE) {
+               if (status & SONIC_INT_FAE)
                        lp->stats.rx_frame_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
-               }
-               if (status & SONIC_INT_CRC) {
+               if (status & SONIC_INT_CRC)
                        lp->stats.rx_crc_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
-               }
-               if (status & SONIC_INT_MP) {
+               if (status & SONIC_INT_MP)
                        lp->stats.rx_missed_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
-               }
 
                /* transmit error */
                if (status & SONIC_INT_TXER) {
-                       if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
-                               netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
-                                         __func__);
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+                       u16 tcr = SONIC_READ(SONIC_TCR);
+
+                       netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+                                 __func__, tcr);
+
+                       if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+                                  SONIC_TCR_FU | SONIC_TCR_BCM)) {
+                               /* Aborted transmission. Try again. */
+                               netif_stop_queue(dev);
+                               SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+                       }
                }
 
                /* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        /* ... to help debug DMA problems causing endless interrupts. */
                        /* Bounce the eth interface to turn on the interrupt again. */
                        SONIC_WRITE(SONIC_IMR, 0);
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
                }
 
-               /* load CAM done */
-               if (status & SONIC_INT_LCD)
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
-       } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+               status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+       } while (status);
+
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return IRQ_HANDLED;
 }
 
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+                          unsigned int last)
+{
+       unsigned int i = last;
+
+       do {
+               i = (i + 1) & SONIC_RRS_MASK;
+               if (addr == lp->rx_laddr[i])
+                       return i;
+       } while (i != last);
+
+       return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+                          struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+       *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+       if (!*new_skb)
+               return false;
+
+       if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+               skb_reserve(*new_skb, 2);
+
+       *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
+       if (!*new_addr) {
+               dev_kfree_skb(*new_skb);
+               *new_skb = NULL;
+               return false;
+       }
+
+       return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+                            dma_addr_t old_addr, dma_addr_t new_addr)
+{
+       unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+       unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+       u32 buf;
+
+       /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+        * scans the other resources in the RRA, those in the range [RWP, RRP).
+        */
+       do {
+               buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+                     sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+               if (buf == old_addr)
+                       break;
+
+               entry = (entry + 1) & SONIC_RRS_MASK;
+       } while (entry != end);
+
+       WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+       entry = (entry + 1) & SONIC_RRS_MASK;
+
+       SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
 /*
  * We have a good packet(s), pass it/them up the network stack.
  */
 static void sonic_rx(struct net_device *dev)
 {
        struct sonic_local *lp = netdev_priv(dev);
-       int status;
        int entry = lp->cur_rx;
+       int prev_entry = lp->eol_rx;
+       bool rbe = false;
 
        while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
-               struct sk_buff *used_skb;
-               struct sk_buff *new_skb;
-               dma_addr_t new_laddr;
-               u16 bufadr_l;
-               u16 bufadr_h;
-               int pkt_len;
-
-               status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
-               if (status & SONIC_RCR_PRX) {
-                       /* Malloc up new buffer. */
-                       new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
-                       if (new_skb == NULL) {
-                               lp->stats.rx_dropped++;
+               u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+               /* If the RD has LPKT set, the chip has finished with the RB */
+               if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+                       struct sk_buff *new_skb;
+                       dma_addr_t new_laddr;
+                       u32 addr = (sonic_rda_get(dev, entry,
+                                                 SONIC_RD_PKTPTR_H) << 16) |
+                                  sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+                       int i = index_from_addr(lp, addr, entry);
+
+                       if (i < 0) {
+                               WARN_ONCE(1, "failed to find buffer!\n");
                                break;
                        }
-                       /* provide 16 byte IP header alignment unless DMA requires otherwise */
-                       if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
-                               skb_reserve(new_skb, 2);
-
-                       new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
-                                              SONIC_RBSIZE, DMA_FROM_DEVICE);
-                       if (!new_laddr) {
-                               dev_kfree_skb(new_skb);
-                               printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+                       if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+                               struct sk_buff *used_skb = lp->rx_skb[i];
+                               int pkt_len;
+
+                               /* Pass the used buffer up the stack */
+                               dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+                                                DMA_FROM_DEVICE);
+
+                               pkt_len = sonic_rda_get(dev, entry,
+                                                       SONIC_RD_PKTLEN);
+                               skb_trim(used_skb, pkt_len);
+                               used_skb->protocol = eth_type_trans(used_skb,
+                                                                   dev);
+                               netif_rx(used_skb);
+                               lp->stats.rx_packets++;
+                               lp->stats.rx_bytes += pkt_len;
+
+                               lp->rx_skb[i] = new_skb;
+                               lp->rx_laddr[i] = new_laddr;
+                       } else {
+                               /* Failed to obtain a new buffer so re-use it */
+                               new_laddr = addr;
                                lp->stats.rx_dropped++;
-                               break;
                        }
-
-                       /* now we have a new skb to replace it, pass the used one up the stack */
-                       dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
-                       used_skb = lp->rx_skb[entry];
-                       pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
-                       skb_trim(used_skb, pkt_len);
-                       used_skb->protocol = eth_type_trans(used_skb, dev);
-                       netif_rx(used_skb);
-                       lp->stats.rx_packets++;
-                       lp->stats.rx_bytes += pkt_len;
-
-                       /* and insert the new skb */
-                       lp->rx_laddr[entry] = new_laddr;
-                       lp->rx_skb[entry] = new_skb;
-
-                       bufadr_l = (unsigned long)new_laddr & 0xffff;
-                       bufadr_h = (unsigned long)new_laddr >> 16;
-                       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
-                       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
-               } else {
-                       /* This should only happen, if we enable accepting broken packets. */
-                       lp->stats.rx_errors++;
-                       if (status & SONIC_RCR_FAER)
-                               lp->stats.rx_frame_errors++;
-                       if (status & SONIC_RCR_CRCR)
-                               lp->stats.rx_crc_errors++;
-               }
-               if (status & SONIC_RCR_LPKT) {
-                       /*
-                        * this was the last packet out of the current receive buffer
-                        * give the buffer back to the SONIC
+                       /* If RBE is already asserted when RWP advances then
+                        * it's safe to clear RBE after processing this packet.
                         */
-                       lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
-                       if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
-                       SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
-                       if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
-                               netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
-                                         __func__);
-                               SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
-                       }
-               } else
-                       printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
-                            dev->name);
+                       rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+                       sonic_update_rra(dev, lp, addr, new_laddr);
+               }
                /*
                 * give back the descriptor
                 */
-               sonic_rda_put(dev, entry, SONIC_RD_LINK,
-                       sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+               sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
                sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
-               sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
-                       sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
-               lp->eol_rx = entry;
-               lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+               prev_entry = entry;
+               entry = (entry + 1) & SONIC_RDS_MASK;
+       }
+
+       lp->cur_rx = entry;
+
+       if (prev_entry != lp->eol_rx) {
+               /* Advance the EOL flag to put descriptors back into service */
+               sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+                             sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+               sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+                             sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+               lp->eol_rx = prev_entry;
        }
+
+       if (rbe)
+               SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
        /*
         * If any worth-while packets have been received, netif_rx()
         * has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
                    (netdev_mc_count(dev) > 15)) {
                        rcr |= SONIC_RCR_AMC;
                } else {
+                       unsigned long flags;
+
                        netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
                                  netdev_mc_count(dev));
                        sonic_set_cam_enable(dev, 1);  /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
                                i++;
                        }
                        SONIC_WRITE(SONIC_CDC, 16);
-                       /* issue Load CAM command */
                        SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+                       /* LCAM and TXP commands can't be used simultaneously */
+                       spin_lock_irqsave(&lp->lock, flags);
+                       sonic_quiesce(dev, SONIC_CR_TXP);
                        SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+                       sonic_quiesce(dev, SONIC_CR_LCAM);
+                       spin_unlock_irqrestore(&lp->lock, flags);
                }
        }
 
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
  */
 static int sonic_init(struct net_device *dev)
 {
-       unsigned int cmd;
        struct sonic_local *lp = netdev_priv(dev);
        int i;
 
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
 
+       /* While in reset mode, clear CAM Enable register */
+       SONIC_WRITE(SONIC_CE, 0);
+
        /*
         * clear software reset flag, disable receiver, clear and
         * enable interrupts, then completely initialize the SONIC
         */
        SONIC_WRITE(SONIC_CMD, 0);
-       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+       sonic_quiesce(dev, SONIC_CR_ALL);
 
        /*
         * initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
        }
 
        /* initialize all RRA registers */
-       lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
-                                       SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-       lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
-                                       SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
-       SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
-       SONIC_WRITE(SONIC_REA, lp->rra_end);
-       SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
-       SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+       SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+       SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+       SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+       SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
        SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
        SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
 
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
        netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
 
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
-       i = 0;
-       while (i++ < 100) {
-               if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
-                       break;
-       }
-
-       netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
-                 SONIC_READ(SONIC_CMD), i);
+       sonic_quiesce(dev, SONIC_CR_RRRA);
 
        /*
         * Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
         * load the CAM
         */
        SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
-       i = 0;
-       while (i++ < 100) {
-               if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
-                       break;
-       }
-       netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
-                 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+       sonic_quiesce(dev, SONIC_CR_LCAM);
 
        /*
         * enable receiver, disable loopback
         * and enable all interrupts
         */
-       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
        SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
        SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
-       cmd = SONIC_READ(SONIC_CMD);
-       if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
-               printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
 
        netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
                  SONIC_READ(SONIC_CMD));
index 2b27f7049acb8a5a5d011c29bfe4c5ba8484f46b..1df6d2f06cc42ccd01a8c61c338863828f1fa3eb 100644 (file)
 #define SONIC_CR_TXP            0x0002
 #define SONIC_CR_HTX            0x0001
 
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+                     SONIC_CR_RXEN | SONIC_CR_TXP)
+
 /*
  * SONIC data configuration bits
  */
 #define SONIC_TCR_NCRS          0x0100
 #define SONIC_TCR_CRLS          0x0080
 #define SONIC_TCR_EXC           0x0040
+#define SONIC_TCR_OWC           0x0020
 #define SONIC_TCR_PMB           0x0008
 #define SONIC_TCR_FU            0x0004
 #define SONIC_TCR_BCM           0x0002
 #define SONIC_NUM_RDS   SONIC_NUM_RRS /* number of receive descriptors */
 #define SONIC_NUM_TDS   16            /* number of transmit descriptors */
 
-#define SONIC_RDS_MASK  (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK  (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK  (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK  (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK  (SONIC_NUM_TDS - 1)
 
 #define SONIC_RBSIZE   1520          /* size of one resource buffer */
 
@@ -312,8 +317,6 @@ struct sonic_local {
        u32 rda_laddr;              /* logical DMA address of RDA */
        dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
        dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
-       unsigned int rra_end;
-       unsigned int cur_rwp;
        unsigned int cur_rx;
        unsigned int cur_tx;           /* first unacked transmit packet */
        unsigned int eol_rx;
@@ -322,6 +325,7 @@ struct sonic_local {
        int msg_enable;
        struct device *device;         /* generic device */
        struct net_device_stats stats;
+       spinlock_t lock;
 };
 
 #define TX_TIMEOUT (3 * HZ)
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
    as far as we can tell. */
 /* OpenBSD calls this "SWO".  I'd like to think that sonic_buf_put()
    is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
                                 int offset, __u16 val)
 {
        if (bitmode)
 #ifdef __BIG_ENDIAN
-               ((__u16 *) base + (offset*2))[1] = val;
+               __raw_writew(val, base + (offset * 2) + 1);
 #else
-               ((__u16 *) base + (offset*2))[0] = val;
+               __raw_writew(val, base + (offset * 2) + 0);
 #endif
        else
-               ((__u16 *) base)[offset] = val;
+               __raw_writew(val, base + (offset * 1) + 0);
 }
 
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
                                  int offset)
 {
        if (bitmode)
 #ifdef __BIG_ENDIAN
-               return ((volatile __u16 *) base + (offset*2))[1];
+               return __raw_readw(base + (offset * 2) + 1);
 #else
-               return ((volatile __u16 *) base + (offset*2))[0];
+               return __raw_readw(base + (offset * 2) + 0);
 #endif
        else
-               return ((volatile __u16 *) base)[offset];
+               return __raw_readw(base + (offset * 1) + 0);
 }
 
 /* Inlines that you should actually use for reading/writing DMA buffers */
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
                             (entry * SIZEOF_SONIC_RR) + offset);
 }
 
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+       struct sonic_local *lp = netdev_priv(dev);
+
+       return lp->rra_laddr +
+              entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+       struct sonic_local *lp = netdev_priv(dev);
+
+       return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+                                             SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
 static const char version[] =
     "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
 
index c80bb83c8ac9236e12821a61e9f2e03317b39762..0a721f6e8676e22f73fa8ade0345d765c3d30d82 100644 (file)
@@ -2652,17 +2652,17 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
        switch (meta->insn.off) {
        case offsetof(struct __sk_buff, len):
-               if (size != FIELD_SIZEOF(struct __sk_buff, len))
+               if (size != sizeof_field(struct __sk_buff, len))
                        return -EOPNOTSUPP;
                wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
                break;
        case offsetof(struct __sk_buff, data):
-               if (size != FIELD_SIZEOF(struct __sk_buff, data))
+               if (size != sizeof_field(struct __sk_buff, data))
                        return -EOPNOTSUPP;
                wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
                break;
        case offsetof(struct __sk_buff, data_end):
-               if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
+               if (size != sizeof_field(struct __sk_buff, data_end))
                        return -EOPNOTSUPP;
                emit_alu(nfp_prog, dst,
                         plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
@@ -2683,12 +2683,12 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
        switch (meta->insn.off) {
        case offsetof(struct xdp_md, data):
-               if (size != FIELD_SIZEOF(struct xdp_md, data))
+               if (size != sizeof_field(struct xdp_md, data))
                        return -EOPNOTSUPP;
                wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
                break;
        case offsetof(struct xdp_md, data_end):
-               if (size != FIELD_SIZEOF(struct xdp_md, data_end))
+               if (size != sizeof_field(struct xdp_md, data_end))
                        return -EOPNOTSUPP;
                emit_alu(nfp_prog, dst,
                         plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
index 8f732771d3fad8965318dca81a5e292a5310d8e6..11c83a99b0140dfb9c11e8246336d6aad0fe8278 100644 (file)
@@ -15,7 +15,7 @@
 
 const struct rhashtable_params nfp_bpf_maps_neutral_params = {
        .nelem_hint             = 4,
-       .key_len                = FIELD_SIZEOF(struct bpf_map, id),
+       .key_len                = sizeof_field(struct bpf_map, id),
        .key_offset             = offsetof(struct nfp_bpf_neutral_map, map_id),
        .head_offset            = offsetof(struct nfp_bpf_neutral_map, l),
        .automatic_shrinking    = true,
index 95a0d3910e316b42f42c82b66b7256e671432ccf..ac02369174a991aef59d03c448928ef643a4a482 100644 (file)
@@ -374,7 +374,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
        }
 
        use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
-                      FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
+                      sizeof_field(struct nfp_bpf_map, use_map[0]);
 
        nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
        if (!nfp_map)
index 31d94592a7c02b1da63855d33b8896fde4bfeb24..e0c985fcaec128c6757cc69ecf16946bfb9da9d3 100644 (file)
@@ -24,7 +24,7 @@ struct nfp_app;
 #define NFP_FL_STAT_ID_MU_NUM          GENMASK(31, 22)
 #define NFP_FL_STAT_ID_STAT            GENMASK(21, 0)
 
-#define NFP_FL_STATS_ELEM_RS           FIELD_SIZEOF(struct nfp_fl_stats_id, \
+#define NFP_FL_STATS_ELEM_RS           sizeof_field(struct nfp_fl_stats_id, \
                                                     init_unalloc)
 #define NFP_FLOWER_MASK_ENTRY_RS       256
 #define NFP_FLOWER_MASK_ELEMENT_RS     1
index 7c4a15e967df337b9f1591b78073a626f27b9586..5defd31d481c26a6bb650301bc4cd16c977f5cb5 100644 (file)
@@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
        freed_stats_id = priv->stats_ring_size;
        /* Check for unallocated entries first. */
        if (priv->stats_ids.init_unalloc > 0) {
-               if (priv->active_mem_unit == priv->total_mem_units) {
-                       priv->stats_ids.init_unalloc--;
-                       priv->active_mem_unit = 0;
-               }
-
                *stats_context_id =
                        FIELD_PREP(NFP_FL_STAT_ID_STAT,
                                   priv->stats_ids.init_unalloc - 1) |
                        FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
                                   priv->active_mem_unit);
-               priv->active_mem_unit++;
+
+               if (++priv->active_mem_unit == priv->total_mem_units) {
+                       priv->stats_ids.init_unalloc--;
+                       priv->active_mem_unit = 0;
+               }
+
                return 0;
        }
 
index e4977cdf767899a3a664919aee2f1687893b884a..c0e2f4394aef843fb41d8697f08ac03b4d840c4e 100644 (file)
@@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
         * first NFP_NET_CFG_BAR_SZ of the BAR.  This keeps the code
         * the identical for PF and VF drivers.
         */
-       ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
+       ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
                                   NFP_NET_CFG_BAR_SZ);
        if (!ctrl_bar) {
                dev_err(&pdev->dev,
@@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
                        bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
 
                map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
-               vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+               vf->q_bar = ioremap(map_addr, bar_sz);
                if (!vf->q_bar) {
                        nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
                        err = -EIO;
@@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 
                /* TX queues */
                map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
-               nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
+               nn->tx_bar = ioremap(map_addr, tx_bar_sz);
                if (!nn->tx_bar) {
                        nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
                        err = -EIO;
@@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 
                /* RX queues */
                map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
-               nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
+               nn->rx_bar = ioremap(map_addr, rx_bar_sz);
                if (!nn->rx_bar) {
                        nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
                        err = -EIO;
index 85d46f206b3c260d163e0c28e27d62f8f42ac722..b454db283aefce409ed1db14ebe638c6e6d420b0 100644 (file)
@@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
        /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
        bar = &nfp->bar[0];
        if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
-               bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+               bar->iomem = ioremap(nfp_bar_resource_start(bar),
                                             nfp_bar_resource_len(bar));
        if (bar->iomem) {
                int pf;
@@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
                }
 
                bar = &nfp->bar[4 + i];
-               bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+               bar->iomem = ioremap(nfp_bar_resource_start(bar),
                                             nfp_bar_resource_len(bar));
                if (bar->iomem) {
                        msg += snprintf(msg, end - msg,
@@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area)
                priv->iomem = priv->bar->iomem + priv->bar_offset;
        else
                /* Must have been too big. Sub-allocate. */
-               priv->iomem = ioremap_nocache(priv->phys, priv->size);
+               priv->iomem = ioremap(priv->phys, priv->size);
 
        if (IS_ERR_OR_NULL(priv->iomem)) {
                dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
index 1a3008e331824c80a0aa0184d9f9e58b1d51291d..b36aa5bf3c5fa564b6c535204f7021962088b10c 100644 (file)
@@ -20,7 +20,7 @@ struct pch_gbe_stats {
 #define PCH_GBE_STAT(m)                                                \
 {                                                              \
        .string = #m,                                           \
-       .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m),       \
+       .size = sizeof_field(struct pch_gbe_hw_stats, m),       \
        .offset = offsetof(struct pch_gbe_hw_stats, m),         \
 }
 
index c303a92d5b06de883bc619ded419ca8587d41732..e8a1b27db84debab0aad020a869660bc310b2b80 100644 (file)
@@ -464,7 +464,7 @@ struct qede_fastpath {
        struct qede_tx_queue    *txq;
        struct qede_tx_queue    *xdp_tx;
 
-#define VEC_NAME_SIZE  (FIELD_SIZEOF(struct net_device, name) + 8)
+#define VEC_NAME_SIZE  (sizeof_field(struct net_device, name) + 8)
        char    name[VEC_NAME_SIZE];
 };
 
index d6cfe4ffbaf3d88390b96ac63481e150978f88db..d1ce4531d01a2f4b0c49835db33285868600c801 100644 (file)
@@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
        netif_addr_lock_bh(ndev);
 
        mc_count = netdev_mc_count(ndev);
-       if (mc_count < 64) {
+       if (mc_count <= 64) {
                netdev_for_each_mc_addr(ha, ndev) {
                        ether_addr_copy(temp, ha->addr);
                        temp += ETH_ALEN;
index 481b096e984de3903668e0d3651e874486d882db..34fa3917eb33070acede16175b5d3dd670bd6e7e 100644 (file)
@@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
                rxq->rx_buf_seg_size = roundup_pow_of_two(size);
        } else {
                rxq->rx_buf_seg_size = PAGE_SIZE;
+               edev->ndev->features &= ~NETIF_F_GRO_HW;
        }
 
        /* Allocate the parallel driver ring for Rx buffers */
@@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
                }
        }
 
+       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
        if (!edev->gro_disable)
                qede_set_tpa_param(rxq);
 err:
@@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev)
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
                         edev->ndev->name, queue_id);
        }
-
-       edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
 }
 
 static int qede_set_real_num_queues(struct qede_dev *edev)
index b4b8ba00ee0151083ca33a14ebf860c7fb7e103b..986f26578d344ee6a37dec1864c3c0dc7c10e3a2 100644 (file)
@@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
        int err;
 
        for (i = 0; i < qdev->num_large_buffers; i++) {
+               lrg_buf_cb = &qdev->lrg_buf[i];
+               memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+
                skb = netdev_alloc_skb(qdev->ndev,
                                       qdev->lrg_buffer_len);
                if (unlikely(!skb)) {
@@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                        ql_free_large_buffers(qdev);
                        return -ENOMEM;
                } else {
-
-                       lrg_buf_cb = &qdev->lrg_buf[i];
-                       memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
                        lrg_buf_cb->index = i;
-                       lrg_buf_cb->skb = skb;
                        /*
                         * We save some space to copy the ethhdr from first
                         * buffer
@@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                return -ENOMEM;
                        }
 
+                       lrg_buf_cb->skb = skb;
                        dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
                        dma_unmap_len_set(lrg_buf_cb, maplen,
                                          qdev->lrg_buffer_len -
index a496390b8632fa3f9dfa9f5ca18bf64d0b9e7f31..07f9067affc65ea4d73543c18016174217c1971a 100644 (file)
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
                        break;
                }
                entry += p_hdr->size;
+               cond_resched();
        }
        p_dev->ahw->reset.seq_index = index;
 }
index a4cd6f2cfb862cb25315823d155b5497e59f5c2f..75d83c3cbf27f2dbf867c69888137f6b17a973c3 100644 (file)
@@ -20,7 +20,7 @@ struct qlcnic_stats {
        int stat_offset;
 };
 
-#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m)
 #define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
 static const u32 qlcnic_fw_dump_level[] = {
        0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
index afa10a163da1fc1a43ea5dde6f8c6da297aa0f48..f34ae8c75bc5e11f4e2a4ee02b20e87983c656f7 100644 (file)
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
                addr += 16;
                reg_read -= 16;
                ret += 16;
+               cond_resched();
        }
 out:
        mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                buf_offset += entry->hdr.cap_size;
                entry_offset += entry->hdr.offset;
                buffer = fw_dump->data + buf_offset;
+               cond_resched();
        }
 
        fw_dump->clr = 1;
index 355cc810e32243361de5cfc233f0d844c5cd685a..cbc6b846ded54e0e95b3d088ec952514797c556d 100644 (file)
@@ -37,7 +37,7 @@ struct fw_info {
        u8      chksum;
 } __packed;
 
-#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0])
+#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0])
 
 static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
 {
index e19b49c4013ed321e53c0bc684676697342be6b8..3591285250e19745d016d607749d6b408e8e9372 100644 (file)
@@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
        if (cd->tsu) {
                add_tsu_reg(ARSTR);
                add_tsu_reg(TSU_CTRST);
-               add_tsu_reg(TSU_FWEN0);
-               add_tsu_reg(TSU_FWEN1);
-               add_tsu_reg(TSU_FCM);
-               add_tsu_reg(TSU_BSYSL0);
-               add_tsu_reg(TSU_BSYSL1);
-               add_tsu_reg(TSU_PRISL0);
-               add_tsu_reg(TSU_PRISL1);
-               add_tsu_reg(TSU_FWSL0);
-               add_tsu_reg(TSU_FWSL1);
+               if (cd->dual_port) {
+                       add_tsu_reg(TSU_FWEN0);
+                       add_tsu_reg(TSU_FWEN1);
+                       add_tsu_reg(TSU_FCM);
+                       add_tsu_reg(TSU_BSYSL0);
+                       add_tsu_reg(TSU_BSYSL1);
+                       add_tsu_reg(TSU_PRISL0);
+                       add_tsu_reg(TSU_PRISL1);
+                       add_tsu_reg(TSU_FWSL0);
+                       add_tsu_reg(TSU_FWSL1);
+               }
                add_tsu_reg(TSU_FWSLC);
-               add_tsu_reg(TSU_QTAGM0);
-               add_tsu_reg(TSU_QTAGM1);
-               add_tsu_reg(TSU_FWSR);
-               add_tsu_reg(TSU_FWINMK);
-               add_tsu_reg(TSU_ADQT0);
-               add_tsu_reg(TSU_ADQT1);
-               add_tsu_reg(TSU_VTAG0);
-               add_tsu_reg(TSU_VTAG1);
+               if (cd->dual_port) {
+                       add_tsu_reg(TSU_QTAGM0);
+                       add_tsu_reg(TSU_QTAGM1);
+                       add_tsu_reg(TSU_FWSR);
+                       add_tsu_reg(TSU_FWINMK);
+                       add_tsu_reg(TSU_ADQT0);
+                       add_tsu_reg(TSU_ADQT1);
+                       add_tsu_reg(TSU_VTAG0);
+                       add_tsu_reg(TSU_VTAG1);
+               }
                add_tsu_reg(TSU_ADSBSY);
                add_tsu_reg(TSU_TEN);
                add_tsu_reg(TSU_POST1);
index 0775b9464b4ea9e20701f52a2397f8a9966be08b..466483c4ac672d877c532f6e5bd637bb64a13e94 100644 (file)
@@ -30,7 +30,7 @@ struct sxgbe_stats {
 #define SXGBE_STAT(m)                                          \
 {                                                              \
        #m,                                                     \
-       FIELD_SIZEOF(struct sxgbe_extra_stats, m),              \
+       sizeof_field(struct sxgbe_extra_stats, m),              \
        offsetof(struct sxgbe_priv_data, xstats.m)              \
 }
 
index c56fcbb370665de398bf72bb8887a9e6874a61d8..52ed111d98f4d6a0638fd1cff1304246993eeb05 100644 (file)
@@ -2296,7 +2296,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt);
 
 
 
-MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
 
 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
index 4d9bbccc6f89cd477f7d56fb4ada54a6a23f17e6..a6ae2cdc19862be9f80a1c4d215c420d718bb44a 100644 (file)
@@ -1401,7 +1401,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
        }
 
        /* Shrink the original UC mapping of the memory BAR */
-       membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+       membase = ioremap(efx->membase_phys, uc_mem_map_size);
        if (!membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not shrink memory BAR to %x\n",
index 992c773620ecfa4d5a7bca066ce5091495ca1992..6891df471538cd391adb200dc04cbdab0ae9a67f 100644 (file)
@@ -1338,7 +1338,7 @@ static int efx_init_io(struct efx_nic *efx)
                rc = -EIO;
                goto fail3;
        }
-       efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+       efx->membase = ioremap(efx->membase_phys, mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not map memory BAR at %llx+%x\n",
@@ -1472,6 +1472,12 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
        n_xdp_tx = num_possible_cpus();
        n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
 
+       vec_count = pci_msix_vec_count(efx->pci_dev);
+       if (vec_count < 0)
+               return vec_count;
+
+       max_channels = min_t(unsigned int, vec_count, max_channels);
+
        /* Check resources.
         * We need a channel per event queue, plus a VI per tx queue.
         * This may be more pessimistic than it needs to be.
@@ -1493,11 +1499,6 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
                          n_xdp_tx, n_xdp_ev);
        }
 
-       n_channels = min(n_channels, max_channels);
-
-       vec_count = pci_msix_vec_count(efx->pci_dev);
-       if (vec_count < 0)
-               return vec_count;
        if (vec_count < n_channels) {
                netif_err(efx, drv, efx->net_dev,
                          "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
@@ -1507,11 +1508,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
                n_channels = vec_count;
        }
 
-       efx->n_channels = n_channels;
+       n_channels = min(n_channels, max_channels);
 
-       /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
-       if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
-               n_channels--;
+       efx->n_channels = n_channels;
 
        /* Ignore XDP tx channels when creating rx channels. */
        n_channels -= efx->n_xdp_channels;
@@ -1531,11 +1530,10 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
                efx->n_rx_channels = n_channels;
        }
 
-       if (efx->n_xdp_channels)
-               efx->xdp_channel_offset = efx->tx_channel_offset +
-                                         efx->n_tx_channels;
-       else
-               efx->xdp_channel_offset = efx->n_channels;
+       efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+       efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+       efx->xdp_channel_offset = n_channels;
 
        netif_dbg(efx, drv, efx->net_dev,
                  "Allocating %u RX channels\n",
@@ -1550,6 +1548,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
 static int efx_probe_interrupts(struct efx_nic *efx)
 {
        unsigned int extra_channels = 0;
+       unsigned int rss_spread;
        unsigned int i, j;
        int rc;
 
@@ -1631,8 +1630,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
        for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
                if (!efx->extra_channel_type[i])
                        continue;
-               if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
-                   efx->n_channels <= extra_channels) {
+               if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
                        efx->extra_channel_type[i]->handle_no_channel(efx);
                } else {
                        --j;
@@ -1643,16 +1641,17 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                }
        }
 
+       rss_spread = efx->n_rx_channels;
        /* RSS might be usable on VFs even if it is disabled on the PF */
 #ifdef CONFIG_SFC_SRIOV
        if (efx->type->sriov_wanted) {
-               efx->rss_spread = ((efx->n_rx_channels > 1 ||
+               efx->rss_spread = ((rss_spread > 1 ||
                                    !efx->type->sriov_wanted(efx)) ?
-                                  efx->n_rx_channels : efx_vf_size(efx));
+                                  rss_spread : efx_vf_size(efx));
                return 0;
        }
 #endif
-       efx->rss_spread = efx->n_rx_channels;
+       efx->rss_spread = rss_spread;
 
        return 0;
 }
index eecc348b1c32f136a5a90405881315b80355bc90..53ae9faeb4c31d930dc5eff2dac0ac5455d5d6a3 100644 (file)
@@ -1265,7 +1265,7 @@ static int ef4_init_io(struct ef4_nic *efx)
                rc = -EIO;
                goto fail3;
        }
-       efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+       efx->membase = ioremap(efx->membase_phys, mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
                          "could not map memory BAR at %llx+%x\n",
index 1f88212be085fb91b310434609a2d419c40b54e1..dfd5182d9e471a34d0c0fed5827f9c3ad3cb3b67 100644 (file)
@@ -1533,9 +1533,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
 
 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
 {
-       return efx_channel_is_xdp_tx(channel) ||
-              (channel->type && channel->type->want_txqs &&
-               channel->type->want_txqs(channel));
+       return true;
 }
 
 static inline struct efx_tx_queue *
index ef52b24ad9e72c8489e5108a8bc4ff4d4e3b9364..c29bf862a94c545082ab215543231310caf54989 100644 (file)
@@ -96,11 +96,12 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
 
 void efx_rx_config_page_split(struct efx_nic *efx)
 {
-       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
+       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+                                     XDP_PACKET_HEADROOM,
                                      EFX_RX_BUF_ALIGNMENT);
        efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
                ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
-               (efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
+               efx->rx_page_buf_step);
        efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
                efx->rx_bufs_per_page;
        efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@@ -190,14 +191,13 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
                page_offset = sizeof(struct efx_rx_page_state);
 
                do {
-                       page_offset += XDP_PACKET_HEADROOM;
-                       dma_addr += XDP_PACKET_HEADROOM;
-
                        index = rx_queue->added_count & rx_queue->ptr_mask;
                        rx_buf = efx_rx_buffer(rx_queue, index);
-                       rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
+                       rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+                                          XDP_PACKET_HEADROOM;
                        rx_buf->page = page;
-                       rx_buf->page_offset = page_offset + efx->rx_ip_align;
+                       rx_buf->page_offset = page_offset + efx->rx_ip_align +
+                                             XDP_PACKET_HEADROOM;
                        rx_buf->len = efx->rx_dma_len;
                        rx_buf->flags = 0;
                        ++rx_queue->added_count;
index 38068fc341410f1237eec25b61a9d9542d7dc1e9..6d90a097ce4e74ad2b4257828d02358e1b60e30c 100644 (file)
@@ -2454,7 +2454,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        pdata = netdev_priv(dev);
        dev->irq = irq;
-       pdata->ioaddr = ioremap_nocache(res->start, res_size);
+       pdata->ioaddr = ioremap(res->start, res_size);
        if (!pdata->ioaddr) {
                retval = -ENOMEM;
                goto out_ioremap_fail;
index f7e927ad67fac36ec5c02eaeeb0972a39b351a7b..b7032422393f69bd8c3ecd97bd8bee83258fdfff 100644 (file)
@@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev,
                phy_ethtool_get_wol(ndev->phydev, wol);
 }
 
-static int ave_ethtool_set_wol(struct net_device *ndev,
-                              struct ethtool_wolinfo *wol)
+static int __ave_ethtool_set_wol(struct net_device *ndev,
+                                struct ethtool_wolinfo *wol)
 {
-       int ret;
-
        if (!ndev->phydev ||
            (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
                return -EOPNOTSUPP;
 
-       ret = phy_ethtool_set_wol(ndev->phydev, wol);
+       return phy_ethtool_set_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+                              struct ethtool_wolinfo *wol)
+{
+       int ret;
+
+       ret = __ave_ethtool_set_wol(ndev, wol);
        if (!ret)
                device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
 
@@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev)
 
        /* set wol initial state disabled */
        wol.wolopts = 0;
-       ave_ethtool_set_wol(ndev, &wol);
+       __ave_ethtool_set_wol(ndev, &wol);
 
        if (!phy_interface_is_rgmii(phydev))
                phy_set_max_speed(phydev, SPEED_100);
@@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev)
 
        ave_ethtool_get_wol(ndev, &wol);
        wol.wolopts = priv->wolopts;
-       ave_ethtool_set_wol(ndev, &wol);
+       __ave_ethtool_set_wol(ndev, &wol);
 
        if (ndev->phydev) {
                ret = phy_resume(ndev->phydev);
index b210e987a1dbd3675d91cb9043b90e7e2a73498d..94f94686cf7de5d25759636d88e1220e6a98f32e 100644 (file)
@@ -365,9 +365,8 @@ struct dma_features {
        unsigned int arpoffsel;
 };
 
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+/* RX Buffer size must be multiple of 4/8/16 bytes */
+#define BUF_SIZE_16KiB 16368
 #define BUF_SIZE_8KiB 8188
 #define BUF_SIZE_4KiB 4096
 #define BUF_SIZE_2KiB 2048
index bd6c01004913a0853348cefd7a93d1f9285c1472..0e2fa14f142377b0c47751d2eb99fa11cb4e445d 100644 (file)
@@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
        struct device *dev = dwmac->dev;
        const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
        struct meson8b_dwmac_clk_configs *clk_configs;
+       static const struct clk_div_table div_table[] = {
+               { .div = 2, .val = 2, },
+               { .div = 3, .val = 3, },
+               { .div = 4, .val = 4, },
+               { .div = 5, .val = 5, },
+               { .div = 6, .val = 6, },
+               { .div = 7, .val = 7, },
+       };
 
        clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
        if (!clk_configs)
@@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
        clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
        clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
        clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
-       clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
-                               CLK_DIVIDER_ALLOW_ZERO |
-                               CLK_DIVIDER_ROUND_CLOSEST;
+       clk_configs->m250_div.table = div_table;
+       clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
+                                     CLK_DIVIDER_ROUND_CLOSEST;
        clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
                                         &clk_divider_ops,
                                         &clk_configs->m250_div.hw);
index 1c8d84ed841020936129aaaf83a9bd9b8a2c8364..01b484cb177e107718cd5d9f080a2293b6efa4f9 100644 (file)
@@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                /* default */
                break;
        case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
                break;
        case PHY_INTERFACE_MODE_RMII:
index 26353ef616b8bf1329f332ce01d8ae692ab617f1..7d40760e9ba887693e5b6b2a925eee73bd7ad92d 100644 (file)
@@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
         * rate, which then uses the auto-reparenting feature of the
         * clock driver, and enabling/disabling the clock.
         */
-       if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+       if (phy_interface_mode_is_rgmii(gmac->interface)) {
                clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
                clk_prepare_enable(gmac->tx_clk);
                gmac->clk_enabled = 1;
index 3b6e559aa0b999cb1b86edfba8db2466706df468..ef8a07c68ca794751589a5834da8affb26cd2f89 100644 (file)
 #define XGMAC_DMA_CH_RX_CONTROL(x)     (0x00003108 + (0x80 * (x)))
 #define XGMAC_RxPBL                    GENMASK(21, 16)
 #define XGMAC_RxPBL_SHIFT              16
+#define XGMAC_RBSZ                     GENMASK(14, 1)
+#define XGMAC_RBSZ_SHIFT               1
 #define XGMAC_RXST                     BIT(0)
 #define XGMAC_DMA_CH_TxDESC_HADDR(x)   (0x00003110 + (0x80 * (x)))
 #define XGMAC_DMA_CH_TxDESC_LADDR(x)   (0x00003114 + (0x80 * (x)))
index 22a7f0cc1b904cf2b7bf197762d674f250fdfdec..f3f08ccc379bc1524df4e0ecb4105d8a1ad7c118 100644 (file)
@@ -482,7 +482,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
        u32 value;
 
        value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
-       value |= bfsize << 1;
+       value &= ~XGMAC_RBSZ;
+       value |= bfsize << XGMAC_RBSZ_SHIFT;
        writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
 }
 
index 1a768837ca728af9a7047db590a6ad40446f3ca5..b29603ec744c4225ad5bddd4e04dbb0f8eeba4ac 100644 (file)
@@ -34,7 +34,7 @@ struct stmmac_stats {
 };
 
 #define STMMAC_STAT(m) \
-       { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m),       \
+       { #m, sizeof_field(struct stmmac_extra_stats, m),       \
        offsetof(struct stmmac_priv, xstats.m)}
 
 static const struct stmmac_stats stmmac_gstrings_stats[] = {
@@ -163,7 +163,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
 
 /* HW MAC Management counters (if supported) */
 #define STMMAC_MMC_STAT(m)     \
-       { #m, FIELD_SIZEOF(struct stmmac_counters, m),  \
+       { #m, sizeof_field(struct stmmac_counters, m),  \
        offsetof(struct stmmac_priv, mmc.m)}
 
 static const struct stmmac_stats stmmac_mmc[] = {
index bbc65bd332a8ba9e0cf5f8a4ac353434dfb8e0cc..80d59b77590797c6d091528676687e2a2b320455 100644 (file)
@@ -46,7 +46,7 @@
 #include "dwxgmac2.h"
 #include "hwif.h"
 
-#define        STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
+#define        STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
 #define        TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
 
 /* Module parameters */
@@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
 #ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
 static void stmmac_init_fs(struct net_device *dev);
 static void stmmac_exit_fs(struct net_device *dev);
 #endif
@@ -1109,7 +1110,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
 {
        int ret = bufsize;
 
-       if (mtu >= BUF_SIZE_4KiB)
+       if (mtu >= BUF_SIZE_8KiB)
+               ret = BUF_SIZE_16KiB;
+       else if (mtu >= BUF_SIZE_4KiB)
                ret = BUF_SIZE_8KiB;
        else if (mtu >= BUF_SIZE_2KiB)
                ret = BUF_SIZE_4KiB;
@@ -1293,19 +1296,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 rx_count = priv->plat->rx_queues_to_use;
        int ret = -ENOMEM;
-       int bfsize = 0;
        int queue;
        int i;
 
-       bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
-       if (bfsize < 0)
-               bfsize = 0;
-
-       if (bfsize < BUF_SIZE_16KiB)
-               bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-
-       priv->dma_buf_sz = bfsize;
-
        /* RX INITIALIZATION */
        netif_dbg(priv, probe, priv->dev,
                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
@@ -1347,8 +1340,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
                }
        }
 
-       buf_sz = bfsize;
-
        return 0;
 
 err_init_rx_buffers:
@@ -2658,6 +2649,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
 static int stmmac_open(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       int bfsize = 0;
        u32 chan;
        int ret;
 
@@ -2677,7 +2669,16 @@ static int stmmac_open(struct net_device *dev)
        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
        priv->xstats.threshold = tc;
 
-       priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+       bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+       if (bfsize < 0)
+               bfsize = 0;
+
+       if (bfsize < BUF_SIZE_16KiB)
+               bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+
+       priv->dma_buf_sz = bfsize;
+       buf_sz = bfsize;
+
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
 
        ret = alloc_dma_desc_resources(priv);
@@ -3053,8 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                tx_q->tx_count_frames = 0;
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
-       } else {
-               stmmac_tx_timer_arm(priv, queue);
        }
 
        /* We've used all descriptors we need for this skb, however,
@@ -3125,6 +3124,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+       stmmac_tx_timer_arm(priv, queue);
 
        return NETDEV_TX_OK;
 
@@ -3276,8 +3276,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                tx_q->tx_count_frames = 0;
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
-       } else {
-               stmmac_tx_timer_arm(priv, queue);
        }
 
        /* We've used all descriptors we need for this skb, however,
@@ -3366,6 +3364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+       stmmac_tx_timer_arm(priv, queue);
 
        return NETDEV_TX_OK;
 
@@ -3646,8 +3645,9 @@ read_again:
                 * feature is always disabled and packets need to be
                 * stripped manually.
                 */
-               if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
-                   unlikely(status != llc_snap)) {
+               if (likely(!(status & rx_not_ls)) &&
+                   (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+                    unlikely(status != llc_snap))) {
                        if (buf2_len)
                                buf2_len -= ETH_FCS_LEN;
                        else
@@ -3829,12 +3829,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       int txfifosz = priv->plat->tx_fifo_size;
+
+       if (txfifosz == 0)
+               txfifosz = priv->dma_cap.tx_fifo_size;
+
+       txfifosz /= priv->plat->tx_queues_to_use;
 
        if (netif_running(dev)) {
                netdev_err(priv->dev, "must be stopped to change its MTU\n");
                return -EBUSY;
        }
 
+       new_mtu = STMMAC_ALIGN(new_mtu);
+
+       /* If condition true, FIFO is too small or MTU too large */
+       if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+               return -EINVAL;
+
        dev->mtu = new_mtu;
 
        netdev_update_features(dev);
@@ -4245,6 +4257,34 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
 }
 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
 
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       if (dev->netdev_ops != &stmmac_netdev_ops)
+               goto done;
+
+       switch (event) {
+       case NETDEV_CHANGENAME:
+               if (priv->dbgfs_dir)
+                       priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+                                                        priv->dbgfs_dir,
+                                                        stmmac_fs_dir,
+                                                        dev->name);
+               break;
+       }
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+       .notifier_call = stmmac_device_event,
+};
+
 static void stmmac_init_fs(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
@@ -4259,12 +4299,15 @@ static void stmmac_init_fs(struct net_device *dev)
        /* Entry to report the DMA HW features */
        debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
                            &stmmac_dma_cap_fops);
+
+       register_netdevice_notifier(&stmmac_notifier);
 }
 
 static void stmmac_exit_fs(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
+       unregister_netdevice_notifier(&stmmac_notifier);
        debugfs_remove_recursive(priv->dbgfs_dir);
 }
 #endif /* CONFIG_DEBUG_FS */
index bedaff0c13bded337997b21b87d076104f16de49..d10ac54bf385adb1e968a28cc9593397d56c1156 100644 (file)
@@ -320,7 +320,7 @@ out:
 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
                         struct device_node *np, struct device *dev)
 {
-       bool mdio = true;
+       bool mdio = !of_phy_is_fixed_link(np);
        static const struct of_device_id need_mdio_ids[] = {
                { .compatible = "snps,dwc-qos-ethernet-4.10" },
                {},
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
                *mac = NULL;
        }
 
-       rc = of_get_phy_mode(np, &plat->phy_interface);
-       if (rc)
-               return ERR_PTR(rc);
+       plat->phy_interface = device_get_phy_mode(&pdev->dev);
+       if (plat->phy_interface < 0)
+               return ERR_PTR(plat->phy_interface);
 
        plat->interface = stmmac_of_get_mac_mode(np);
        if (plat->interface < 0)
index f3d8b9336b8e377f3eadc13031cf043f88f1a4d2..450d7dac3ea6f195fdd8cf083b86022a1cfa648f 100644 (file)
@@ -80,7 +80,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
        if (attr->max_size && (attr->max_size > size))
                size = attr->max_size;
 
-       skb = netdev_alloc_skb_ip_align(priv->dev, size);
+       skb = netdev_alloc_skb(priv->dev, size);
        if (!skb)
                return NULL;
 
@@ -244,6 +244,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
                                         struct net_device *orig_ndev)
 {
        struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+       unsigned char *src = tpriv->packet->src;
+       unsigned char *dst = tpriv->packet->dst;
        struct stmmachdr *shdr;
        struct ethhdr *ehdr;
        struct udphdr *uhdr;
@@ -260,15 +262,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
                goto out;
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (tpriv->packet->dst) {
-               if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+       if (dst) {
+               if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
                        goto out;
        }
        if (tpriv->packet->sarc) {
-               if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+               if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
                        goto out;
-       } else if (tpriv->packet->src) {
-               if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
+       } else if (src) {
+               if (!ether_addr_equal_unaligned(ehdr->h_source, src))
                        goto out;
        }
 
@@ -624,6 +626,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
                return -EOPNOTSUPP;
        if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
                return -EOPNOTSUPP;
+       if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
 
        while (--tries) {
                /* We only need to check the mc_addr for collisions */
@@ -666,6 +670,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
+       if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+               return -EOPNOTSUPP;
        if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
                return -EOPNOTSUPP;
 
@@ -710,7 +716,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
        struct ethhdr *ehdr;
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+       if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
                goto out;
        if (ehdr->h_proto != htons(ETH_P_PAUSE))
                goto out;
@@ -847,12 +853,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb,
        if (tpriv->vlan_id) {
                if (skb->vlan_proto != htons(proto))
                        goto out;
-               if (skb->vlan_tci != tpriv->vlan_id)
+               if (skb->vlan_tci != tpriv->vlan_id) {
+                       /* Means filter did not work. */
+                       tpriv->ok = false;
+                       complete(&tpriv->comp);
                        goto out;
+               }
        }
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+       if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
                goto out;
 
        ihdr = ip_hdr(skb);
@@ -961,6 +971,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
 {
        int ret, prev_cap = priv->dma_cap.vlhash;
 
+       if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+               return -EOPNOTSUPP;
+
        priv->dma_cap.vlhash = 0;
        ret = __stmmac_test_vlanfilt(priv);
        priv->dma_cap.vlhash = prev_cap;
@@ -1053,6 +1066,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
 {
        int ret, prev_cap = priv->dma_cap.vlhash;
 
+       if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+               return -EOPNOTSUPP;
+
        priv->dma_cap.vlhash = 0;
        ret = __stmmac_test_dvlanfilt(priv);
        priv->dma_cap.vlhash = prev_cap;
@@ -1319,16 +1335,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
        struct stmmac_packet_attrs attr = { };
        struct flow_dissector *dissector;
        struct flow_cls_offload *cls;
+       int ret, old_enable = 0;
        struct flow_rule *rule;
-       int ret;
 
        if (!tc_can_offload(priv->dev))
                return -EOPNOTSUPP;
        if (!priv->dma_cap.l3l4fnum)
                return -EOPNOTSUPP;
-       if (priv->rss.enable)
+       if (priv->rss.enable) {
+               old_enable = priv->rss.enable;
+               priv->rss.enable = false;
                stmmac_rss_configure(priv, priv->hw, NULL,
                                     priv->plat->rx_queues_to_use);
+       }
 
        dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
        if (!dissector) {
@@ -1395,7 +1414,8 @@ cleanup_cls:
 cleanup_dissector:
        kfree(dissector);
 cleanup_rss:
-       if (priv->rss.enable) {
+       if (old_enable) {
+               priv->rss.enable = old_enable;
                stmmac_rss_configure(priv, priv->hw, &priv->rss,
                                     priv->plat->rx_queues_to_use);
        }
@@ -1440,16 +1460,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
        struct stmmac_packet_attrs attr = { };
        struct flow_dissector *dissector;
        struct flow_cls_offload *cls;
+       int ret, old_enable = 0;
        struct flow_rule *rule;
-       int ret;
 
        if (!tc_can_offload(priv->dev))
                return -EOPNOTSUPP;
        if (!priv->dma_cap.l3l4fnum)
                return -EOPNOTSUPP;
-       if (priv->rss.enable)
+       if (priv->rss.enable) {
+               old_enable = priv->rss.enable;
+               priv->rss.enable = false;
                stmmac_rss_configure(priv, priv->hw, NULL,
                                     priv->plat->rx_queues_to_use);
+       }
 
        dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
        if (!dissector) {
@@ -1521,7 +1544,8 @@ cleanup_cls:
 cleanup_dissector:
        kfree(dissector);
 cleanup_rss:
-       if (priv->rss.enable) {
+       if (old_enable) {
+               priv->rss.enable = old_enable;
                stmmac_rss_configure(priv, priv->hw, &priv->rss,
                                     priv->plat->rx_queues_to_use);
        }
@@ -1574,7 +1598,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb,
        struct arphdr *ahdr;
 
        ehdr = (struct ethhdr *)skb_mac_header(skb);
-       if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
+       if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
                goto out;
 
        ahdr = arp_hdr(skb);
index 7d972e0fd2b04d8759a016b023e07b30381e3dd3..9ffae12a212273c8c3d85844ff0a1e0dc6efcfd3 100644 (file)
@@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv,
 {
        int ret = 0;
 
+       /* When RSS is enabled, the filtering will be bypassed */
+       if (priv->rss.enable)
+               return -EBUSY;
+
        switch (cls->command) {
        case FLOW_CLS_REPLACE:
                ret = tc_add_flow(priv, cls);
index a46f4189fde37ba1b2102b80e1415f6351cb03bf..bf98e0fa7d8be8ccf2c4b4cb60b1a70df073e276 100644 (file)
@@ -63,6 +63,7 @@ config TI_CPSW_SWITCHDEV
        tristate "TI CPSW Switch Support with switchdev"
        depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
        depends on NET_SWITCHDEV
+       select PAGE_POOL
        select TI_DAVINCI_MDIO
        select MFD_SYSCON
        select REGMAP
index d34df8e5cf941b8ff4e466fdb1731e39db1ee658..ecf776ad86895317479e782d2bb5704b550d3357 100644 (file)
@@ -5,6 +5,7 @@
 
 obj-$(CONFIG_TI_CPSW) += cpsw-common.o
 obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
 
 obj-$(CONFIG_TLAN) += tlan.o
 obj-$(CONFIG_CPMAC) += cpmac.o
index 31248a6cc642e99f990cdd96b038724320b007b3..fa54efe3be63544dcd7f59142358854cbc7e6f40 100644 (file)
@@ -73,13 +73,13 @@ enum {
 };
 
 #define CPSW_STAT(m)           CPSW_STATS,                             \
-                               FIELD_SIZEOF(struct cpsw_hw_stats, m), \
+                               sizeof_field(struct cpsw_hw_stats, m), \
                                offsetof(struct cpsw_hw_stats, m)
 #define CPDMA_RX_STAT(m)       CPDMA_RX_STATS,                            \
-                               FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+                               sizeof_field(struct cpdma_chan_stats, m), \
                                offsetof(struct cpdma_chan_stats, m)
 #define CPDMA_TX_STAT(m)       CPDMA_TX_STATS,                            \
-                               FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+                               sizeof_field(struct cpdma_chan_stats, m), \
                                offsetof(struct cpdma_chan_stats, m)
 
 static const struct cpsw_stats cpsw_gstrings_stats[] = {
index 37ba708ac78136c70df9897999fd8c2c28e96302..6614fa3089b2ce6124f78f8b050d9c00e5762431 100644 (file)
@@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
        struct cpdma_chan               *chan = si->chan;
        struct cpdma_ctlr               *ctlr = chan->ctlr;
        int                             len = si->len;
-       int                             swlen = len;
        struct cpdma_desc __iomem       *desc;
        dma_addr_t                      buffer;
        u32                             mode;
@@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
        if (si->data_dma) {
                buffer = si->data_dma;
                dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
-               swlen |= CPDMA_DMA_EXT_MAP;
        } else {
                buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
                ret = dma_mapping_error(ctlr->dev, buffer);
@@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si)
        writel_relaxed(mode | len, &desc->hw_mode);
        writel_relaxed((uintptr_t)si->token, &desc->sw_token);
        writel_relaxed(buffer, &desc->sw_buffer);
-       writel_relaxed(swlen, &desc->sw_len);
+       writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
+                      &desc->sw_len);
        desc_read(desc, sw_len);
 
        __cpdma_chan_submit(chan, desc);
index 1b2702f7445520234187ec66230fe1dacd0d2a26..675f31de59ddc7204f79bf36afa5c5dce736cc33 100644 (file)
@@ -2019,7 +2019,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                        goto quit;
                }
 
-               efuse = devm_ioremap_nocache(dev, res.start, size);
+               efuse = devm_ioremap(dev, res.start, size);
                if (!efuse) {
                        dev_err(dev, "could not map resource\n");
                        devm_release_mem_region(dev, res.start, size);
index 86a3f42a3dcc01371db5996747b1764c3e09843c..d6a192c1f3378e46277eb77e9851e3d9df1c0fec 100644 (file)
@@ -783,28 +783,28 @@ struct netcp_ethtool_stat {
 #define GBE_STATSA_INFO(field)                                         \
 {                                                                      \
        "GBE_A:"#field, GBE_STATSA_MODULE,                              \
-       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       sizeof_field(struct gbe_hw_stats, field),                       \
        offsetof(struct gbe_hw_stats, field)                            \
 }
 
 #define GBE_STATSB_INFO(field)                                         \
 {                                                                      \
        "GBE_B:"#field, GBE_STATSB_MODULE,                              \
-       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       sizeof_field(struct gbe_hw_stats, field),                       \
        offsetof(struct gbe_hw_stats, field)                            \
 }
 
 #define GBE_STATSC_INFO(field)                                         \
 {                                                                      \
        "GBE_C:"#field, GBE_STATSC_MODULE,                              \
-       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       sizeof_field(struct gbe_hw_stats, field),                       \
        offsetof(struct gbe_hw_stats, field)                            \
 }
 
 #define GBE_STATSD_INFO(field)                                         \
 {                                                                      \
        "GBE_D:"#field, GBE_STATSD_MODULE,                              \
-       FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
+       sizeof_field(struct gbe_hw_stats, field),                       \
        offsetof(struct gbe_hw_stats, field)                            \
 }
 
@@ -957,7 +957,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 #define GBENU_STATS_HOST(field)                                        \
 {                                                              \
        "GBE_HOST:"#field, GBENU_STATS0_MODULE,                 \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
@@ -967,56 +967,56 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 #define GBENU_STATS_P1(field)                                  \
 {                                                              \
        "GBE_P1:"#field, GBENU_STATS1_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P2(field)                                  \
 {                                                              \
        "GBE_P2:"#field, GBENU_STATS2_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P3(field)                                  \
 {                                                              \
        "GBE_P3:"#field, GBENU_STATS3_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P4(field)                                  \
 {                                                              \
        "GBE_P4:"#field, GBENU_STATS4_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P5(field)                                  \
 {                                                              \
        "GBE_P5:"#field, GBENU_STATS5_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P6(field)                                  \
 {                                                              \
        "GBE_P6:"#field, GBENU_STATS6_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P7(field)                                  \
 {                                                              \
        "GBE_P7:"#field, GBENU_STATS7_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
 #define GBENU_STATS_P8(field)                                  \
 {                                                              \
        "GBE_P8:"#field, GBENU_STATS8_MODULE,                   \
-       FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
+       sizeof_field(struct gbenu_hw_stats, field),             \
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
@@ -1607,21 +1607,21 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
 #define XGBE_STATS0_INFO(field)                                \
 {                                                      \
        "GBE_0:"#field, XGBE_STATS0_MODULE,             \
-       FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
+       sizeof_field(struct xgbe_hw_stats, field),      \
        offsetof(struct xgbe_hw_stats, field)           \
 }
 
 #define XGBE_STATS1_INFO(field)                                \
 {                                                      \
        "GBE_1:"#field, XGBE_STATS1_MODULE,             \
-       FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
+       sizeof_field(struct xgbe_hw_stats, field),      \
        offsetof(struct xgbe_hw_stats, field)           \
 }
 
 #define XGBE_STATS2_INFO(field)                                \
 {                                                      \
        "GBE_2:"#field, XGBE_STATS2_MODULE,             \
-       FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
+       sizeof_field(struct xgbe_hw_stats, field),      \
        offsetof(struct xgbe_hw_stats, field)           \
 }
 
index 21c1b4322ea78941d0ee58801dbe62f3442f1acc..c66aab78dcac801e234c80c7ac4b438f0da538d9 100644 (file)
@@ -1202,7 +1202,7 @@ static int temac_probe(struct platform_device *pdev)
 
        /* map device registers */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
+       lp->regs = devm_ioremap(&pdev->dev, res->start,
                                        resource_size(res));
        if (IS_ERR(lp->regs)) {
                dev_err(&pdev->dev, "could not map TEMAC registers\n");
@@ -1296,7 +1296,7 @@ static int temac_probe(struct platform_device *pdev)
        } else if (pdata) {
                /* 2nd memory resource specifies DMA registers */
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
+               lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
                                                     resource_size(res));
                if (IS_ERR(lp->sdma_regs)) {
                        dev_err(&pdev->dev,
index 56b7791911bfc3b32d87922ca45632202e7963a4..077c68498f048a57ac798f665a2ee043f43cf06c 100644 (file)
@@ -614,7 +614,7 @@ static int dfx_register(struct device *bdev)
 
        /* Set up I/O base address. */
        if (dfx_use_mmio) {
-               bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]);
+               bp->base.mem = ioremap(bar_start[0], bar_len[0]);
                if (!bp->base.mem) {
                        printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
                        err = -ENOMEM;
index 060712c666bf432f0570a98532a655984144e9b1..eaf85db53a5efe0de0ae6ae4cf13282499b6adc0 100644 (file)
@@ -1318,7 +1318,7 @@ static int fza_probe(struct device *bdev)
        }
 
        /* MMIO mapping setup. */
-       mmio = ioremap_nocache(start, len);
+       mmio = ioremap(start, len);
        if (!mmio) {
                pr_err("%s: cannot map MMIO\n", fp->name);
                ret = -ENOMEM;
index 09f3604cfbf8fe3a73b409fc9021a4df686440f2..746736c83873702ee30114f5bce45b32ce7352eb 100644 (file)
@@ -21,7 +21,7 @@ struct fjes_stats {
 
 #define FJES_STAT(name, stat) { \
        .stat_string = name, \
-       .sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \
+       .sizeof_stat = sizeof_field(struct fjes_adapter, stat), \
        .stat_offset = offsetof(struct fjes_adapter, stat) \
 }
 
index 8a4fbfacad7ef09e0463bf1a6a0fa76b75393210..065bb0a40b1d1e3343bfaeadc3eb85b8a129f551 100644 (file)
@@ -40,7 +40,7 @@ static u8 *fjes_hw_iomap(struct fjes_hw *hw)
                return NULL;
        }
 
-       base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+       base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
 
        return base;
 }
index b517c1af9de052d6d5fcc2d8b249d8544d5b49b9..91a1059517f55adc66cbeaed328a6077fa47b0d1 100644 (file)
@@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device)
        /* create platform_device */
        plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
                                                   ARRAY_SIZE(fjes_resource));
+       if (IS_ERR(plat_dev))
+               return PTR_ERR(plat_dev);
+
        device->driver_data = plat_dev;
 
        return 0;
index c611b6a80b20d590266c699fe86902187b812490..9237b69d8e217671a532e9af01809a277ea2c3dc 100644 (file)
@@ -28,7 +28,7 @@ TRACE_EVENT(fjes_hw_issue_request_command,
                __field(u8, cs_busy)
                __field(u8, cs_complete)
                __field(int, timeout)
-               __field(int, ret);
+               __field(int, ret)
        ),
        TP_fast_assign(
                __entry->cr_req = cr->bits.req_code;
index 5c6b7fc04ea6294b6592b23b278d3b70600ea05a..75757e9954ba24cda3aa8e9252a60be1bf9cd14e 100644 (file)
@@ -1156,7 +1156,7 @@ static void geneve_setup(struct net_device *dev)
 
 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
        [IFLA_GENEVE_ID]                = { .type = NLA_U32 },
-       [IFLA_GENEVE_REMOTE]            = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_GENEVE_REMOTE]            = { .len = sizeof_field(struct iphdr, daddr) },
        [IFLA_GENEVE_REMOTE6]           = { .len = sizeof(struct in6_addr) },
        [IFLA_GENEVE_TTL]               = { .type = NLA_U8 },
        [IFLA_GENEVE_TOS]               = { .type = NLA_U8 },
index ecfe26215935d3c5df02a9de58693f492b50befc..9b3ba98726d7c98ed67acdda9fbb5a4bc7f3aa9f 100644 (file)
@@ -38,7 +38,6 @@ struct pdp_ctx {
        struct hlist_node       hlist_addr;
 
        union {
-               u64             tid;
                struct {
                        u64     tid;
                        u16     flow;
@@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
                mtu = dst_mtu(&rt->dst);
        }
 
-       rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+       rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
 
        if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
            mtu < ntohs(iph->tot_len)) {
@@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev)
 }
 
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
-static void gtp_hashtable_free(struct gtp_dev *gtp);
 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
 
+static void gtp_destructor(struct net_device *dev)
+{
+       struct gtp_dev *gtp = netdev_priv(dev);
+
+       kfree(gtp->addr_hash);
+       kfree(gtp->tid_hash);
+}
+
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[],
                       struct netlink_ext_ack *extack)
@@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                return err;
 
-       if (!data[IFLA_GTP_PDP_HASHSIZE])
+       if (!data[IFLA_GTP_PDP_HASHSIZE]) {
                hashsize = 1024;
-       else
+       } else {
                hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+               if (!hashsize)
+                       hashsize = 1024;
+       }
 
        err = gtp_hashtable_new(gtp, hashsize);
        if (err < 0)
@@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
 
        gn = net_generic(dev_net(dev), gtp_net_id);
        list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+       dev->priv_destructor = gtp_destructor;
 
        netdev_dbg(dev, "registered new GTP interface\n");
 
        return 0;
 
 out_hashtable:
-       gtp_hashtable_free(gtp);
+       kfree(gtp->addr_hash);
+       kfree(gtp->tid_hash);
 out_encap:
        gtp_encap_disable(gtp);
        return err;
@@ -693,8 +704,13 @@ out_encap:
 static void gtp_dellink(struct net_device *dev, struct list_head *head)
 {
        struct gtp_dev *gtp = netdev_priv(dev);
+       struct pdp_ctx *pctx;
+       int i;
+
+       for (i = 0; i < gtp->hash_size; i++)
+               hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+                       pdp_context_delete(pctx);
 
-       gtp_hashtable_free(gtp);
        list_del_rcu(&gtp->list);
        unregister_netdevice_queue(dev, head);
 }
@@ -772,20 +788,6 @@ err1:
        return -ENOMEM;
 }
 
-static void gtp_hashtable_free(struct gtp_dev *gtp)
-{
-       struct pdp_ctx *pctx;
-       int i;
-
-       for (i = 0; i < gtp->hash_size; i++)
-               hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
-                       pdp_context_delete(pctx);
-
-       synchronize_rcu();
-       kfree(gtp->addr_hash);
-       kfree(gtp->tid_hash);
-}
-
 static struct sock *gtp_encap_enable_socket(int fd, int type,
                                            struct gtp_dev *gtp)
 {
@@ -802,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
                return NULL;
        }
 
-       if (sock->sk->sk_protocol != IPPROTO_UDP) {
+       sk = sock->sk;
+       if (sk->sk_protocol != IPPROTO_UDP ||
+           sk->sk_type != SOCK_DGRAM ||
+           (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
                pr_debug("socket fd=%d not UDP\n", fd);
                sk = ERR_PTR(-EINVAL);
                goto out_sock;
        }
 
-       lock_sock(sock->sk);
-       if (sock->sk->sk_user_data) {
+       lock_sock(sk);
+       if (sk->sk_user_data) {
                sk = ERR_PTR(-EBUSY);
-               goto out_sock;
+               goto out_rel_sock;
        }
 
-       sk = sock->sk;
        sock_hold(sk);
 
        tuncfg.sk_user_data = gtp;
@@ -824,8 +828,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
 
        setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
 
-out_sock:
+out_rel_sock:
        release_sock(sock->sk);
+out_sock:
        sockfd_put(sock);
        return sk;
 }
@@ -926,24 +931,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
        }
 }
 
-static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
-                       struct genl_info *info)
+static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+                      struct genl_info *info)
 {
+       struct pdp_ctx *pctx, *pctx_tid = NULL;
        struct net_device *dev = gtp->dev;
        u32 hash_ms, hash_tid = 0;
-       struct pdp_ctx *pctx;
+       unsigned int version;
        bool found = false;
        __be32 ms_addr;
 
        ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
        hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+       version = nla_get_u32(info->attrs[GTPA_VERSION]);
 
-       hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
-               if (pctx->ms_addr_ip4.s_addr == ms_addr) {
-                       found = true;
-                       break;
-               }
-       }
+       pctx = ipv4_pdp_find(gtp, ms_addr);
+       if (pctx)
+               found = true;
+       if (version == GTP_V0)
+               pctx_tid = gtp0_pdp_find(gtp,
+                                        nla_get_u64(info->attrs[GTPA_TID]));
+       else if (version == GTP_V1)
+               pctx_tid = gtp1_pdp_find(gtp,
+                                        nla_get_u32(info->attrs[GTPA_I_TEI]));
+       if (pctx_tid)
+               found = true;
 
        if (found) {
                if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
@@ -951,6 +963,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
                if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
 
+               if (pctx && pctx_tid)
+                       return -EEXIST;
+               if (!pctx)
+                       pctx = pctx_tid;
+
                ipv4_pdp_fill(pctx, info);
 
                if (pctx->gtp_version == GTP_V0)
@@ -1074,7 +1091,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
                goto out_unlock;
        }
 
-       err = ipv4_pdp_add(gtp, sk, info);
+       err = gtp_pdp_add(gtp, sk, info);
 
 out_unlock:
        rcu_read_unlock();
@@ -1232,43 +1249,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
                                struct netlink_callback *cb)
 {
        struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+       int i, j, bucket = cb->args[0], skip = cb->args[1];
        struct net *net = sock_net(skb->sk);
-       struct gtp_net *gn = net_generic(net, gtp_net_id);
-       unsigned long tid = cb->args[1];
-       int i, k = cb->args[0], ret;
        struct pdp_ctx *pctx;
+       struct gtp_net *gn;
+
+       gn = net_generic(net, gtp_net_id);
 
        if (cb->args[4])
                return 0;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
                if (last_gtp && last_gtp != gtp)
                        continue;
                else
                        last_gtp = NULL;
 
-               for (i = k; i < gtp->hash_size; i++) {
-                       hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
-                               if (tid && tid != pctx->u.tid)
-                                       continue;
-                               else
-                                       tid = 0;
-
-                               ret = gtp_genl_fill_info(skb,
-                                                        NETLINK_CB(cb->skb).portid,
-                                                        cb->nlh->nlmsg_seq,
-                                                        cb->nlh->nlmsg_type, pctx);
-                               if (ret < 0) {
+               for (i = bucket; i < gtp->hash_size; i++) {
+                       j = 0;
+                       hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
+                                                hlist_tid) {
+                               if (j >= skip &&
+                                   gtp_genl_fill_info(skb,
+                                           NETLINK_CB(cb->skb).portid,
+                                           cb->nlh->nlmsg_seq,
+                                           cb->nlh->nlmsg_type, pctx)) {
                                        cb->args[0] = i;
-                                       cb->args[1] = pctx->u.tid;
+                                       cb->args[1] = j;
                                        cb->args[2] = (unsigned long)gtp;
                                        goto out;
                                }
+                               j++;
                        }
+                       skip = 0;
                }
+               bucket = 0;
        }
        cb->args[4] = 1;
 out:
+       rcu_read_unlock();
        return skb->len;
 }
 
index 23281aeeb2226ee4dc9d7655aebc247c94c94def..71d6629e65c970e7e133cfa615aa01341a68d43f 100644 (file)
@@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
 {
        struct sixpack *sp;
 
-       write_lock_bh(&disc_data_lock);
+       write_lock_irq(&disc_data_lock);
        sp = tty->disc_data;
        tty->disc_data = NULL;
-       write_unlock_bh(&disc_data_lock);
+       write_unlock_irq(&disc_data_lock);
        if (!sp)
                return;
 
index c5bfa19ddb932f2369b1a933f34c61a17b00b1b9..deef14215110494783720b94908b432537ee664a 100644 (file)
@@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty)
 {
        struct mkiss *ax;
 
-       write_lock_bh(&disc_data_lock);
+       write_lock_irq(&disc_data_lock);
        ax = tty->disc_data;
        tty->disc_data = NULL;
-       write_unlock_bh(&disc_data_lock);
+       write_unlock_irq(&disc_data_lock);
 
        if (!ax)
                return;
index 9caa876ce6e8313d752cf7592ba54a8a3ba74df0..dc44819946e69ac8730eb95551af65ab088df79b 100644 (file)
@@ -169,7 +169,6 @@ struct rndis_device {
 
        u8 hw_mac_adr[ETH_ALEN];
        u8 rss_key[NETVSC_HASH_KEYLEN];
-       u16 rx_table[ITAB_NUM];
 };
 
 
@@ -940,6 +939,8 @@ struct net_device_context {
 
        u32 tx_table[VRSS_SEND_TAB_SIZE];
 
+       u16 rx_table[ITAB_NUM];
+
        /* Ethtool settings */
        u8 duplex;
        u32 speed;
index eff8fef4f775f185dc459e0fb87c51f2f0e4d7f8..f3f9eb8a402a2e8615184ad6e47dc9b3ae8d4be7 100644 (file)
@@ -571,7 +571,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 
        /* Use the skb control buffer for building up the packet */
        BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
-                       FIELD_SIZEOF(struct sk_buff, cb));
+                       sizeof_field(struct sk_buff, cb));
        packet = (struct hv_netvsc_packet *)skb->cb;
 
        packet->q_idx = skb_get_queue_mapping(skb);
@@ -1662,7 +1662,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
        rndis_dev = ndev->extension;
        if (indir) {
                for (i = 0; i < ITAB_NUM; i++)
-                       indir[i] = rndis_dev->rx_table[i];
+                       indir[i] = ndc->rx_table[i];
        }
 
        if (key)
@@ -1692,7 +1692,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
                                return -EINVAL;
 
                for (i = 0; i < ITAB_NUM; i++)
-                       rndis_dev->rx_table[i] = indir[i];
+                       ndc->rx_table[i] = indir[i];
        }
 
        if (!key) {
index 206b4e77eaf0394d6c677b094a60258e64576ede..e66d77dc28c8cc4ab4417b355bfbafe1344db489 100644 (file)
@@ -773,6 +773,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
                                   const u8 *rss_key, u16 flag)
 {
        struct net_device *ndev = rdev->ndev;
+       struct net_device_context *ndc = netdev_priv(ndev);
        struct rndis_request *request;
        struct rndis_set_request *set;
        struct rndis_set_complete *set_complete;
@@ -812,7 +813,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
        /* Set indirection table entries */
        itab = (u32 *)(rssp + 1);
        for (i = 0; i < ITAB_NUM; i++)
-               itab[i] = rdev->rx_table[i];
+               itab[i] = ndc->rx_table[i];
 
        /* Set hask key values */
        keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
@@ -1171,6 +1172,9 @@ int rndis_set_subchannel(struct net_device *ndev,
        wait_event(nvdev->subchan_open,
                   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
 
+       for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+               ndev_ctx->tx_table[i] = i % nvdev->num_chn;
+
        /* ignore failures from setting rss parameters, still have channels */
        if (dev_info)
                rndis_filter_set_rss_param(rdev, dev_info->rss_key);
@@ -1180,9 +1184,6 @@ int rndis_set_subchannel(struct net_device *ndev,
        netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
        netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
 
-       for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
-               ndev_ctx->tx_table[i] = i % nvdev->num_chn;
-
        return 0;
 }
 
@@ -1312,6 +1313,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                                      struct netvsc_device_info *device_info)
 {
        struct net_device *net = hv_get_drvdata(dev);
+       struct net_device_context *ndc = netdev_priv(net);
        struct netvsc_device *net_device;
        struct rndis_device *rndis_device;
        struct ndis_recv_scale_cap rsscap;
@@ -1398,9 +1400,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
        /* We will use the given number of channels if available. */
        net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
 
-       for (i = 0; i < ITAB_NUM; i++)
-               rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
+       if (!netif_is_rxfh_configured(net)) {
+               for (i = 0; i < ITAB_NUM; i++)
+                       ndc->rx_table[i] = ethtool_rxfh_indir_default(
                                                i, net_device->num_chn);
+       }
 
        atomic_set(&net_device->open_chn, 1);
        vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
@@ -1439,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
        /* Halt and release the rndis device */
        rndis_filter_halt_device(net_dev, rndis_dev);
 
-       net_dev->extension = NULL;
-
        netvsc_device_remove(dev);
 }
 
index 05631d97eeb4fbfe3ca599dfdccb1c355b1feb45..c5bf61565726b15aa1ea63590d7d8627b0c20d4a 100644 (file)
@@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct macvlan_dev *dest;
 
        if (vlan->mode == MACVLAN_MODE_BRIDGE) {
-               const struct ethhdr *eth = (void *)skb->data;
+               const struct ethhdr *eth = skb_eth_hdr(skb);
 
                /* send to other bridge ports directly */
                if (is_multicast_ether_addr(eth->h_dest)) {
+                       skb_reset_mac_header(skb);
                        macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
                        goto xmit_world;
                }
index 059711edfc61e9d8119d1a35afc7b0dc468f9ed2..4b39aba2e9c47213872129ad2e1596f9334de3a9 100644 (file)
@@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
 
        get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
 
-       id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+       id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
        err = devlink_region_snapshot_create(nsim_dev->dummy_region,
                                             dummy_data, id, kfree);
        if (err) {
index 5848219005d7918ce8f403f49a5f04904e2db01f..8dc461f7574b79c77b3128752feea30851b01a4b 100644 (file)
@@ -340,14 +340,14 @@ config DAVICOM_PHY
          Currently supports dm9161e and dm9131
 
 config DP83822_PHY
-       tristate "Texas Instruments DP83822 PHY"
+       tristate "Texas Instruments DP83822/825 PHYs"
        ---help---
-         Supports the DP83822 PHY.
+         Supports the DP83822 and DP83825I PHYs.
 
 config DP83TC811_PHY
-       tristate "Texas Instruments DP83TC822 PHY"
+       tristate "Texas Instruments DP83TC811 PHY"
        ---help---
-         Supports the DP83TC822 PHY.
+         Supports the DP83TC811 PHY.
 
 config DP83848_PHY
        tristate "Texas Instruments DP83848 PHY"
index 3b29d381116f8e88dc25dc0d17c485db2b4ef6e2..975789d9349dc5f72184af47e5c132e2a19c265b 100644 (file)
@@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
        .config_intr    = aqr_config_intr,
        .ack_interrupt  = aqr_ack_interrupt,
        .read_status    = aqr_read_status,
+       .suspend        = aqr107_suspend,
+       .resume         = aqr107_resume,
 },
 {
        PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
index 9cd9dcee4eb2e8621e6ad03f49ec837c993fdd30..01cf71358359a2bd4fea0d3ba0826d7017a64897 100644 (file)
@@ -97,6 +97,7 @@
 #define DP83867_PHYCR_FIFO_DEPTH_MAX           0x03
 #define DP83867_PHYCR_FIFO_DEPTH_MASK          GENMASK(15, 14)
 #define DP83867_PHYCR_RESERVED_MASK            BIT(11)
+#define DP83867_PHYCR_FORCE_LINK_GOOD          BIT(10)
 
 /* RGMIIDCTL bits */
 #define DP83867_RGMII_TX_CLK_DELAY_MAX         0xf
@@ -599,7 +600,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
 
        usleep_range(10, 20);
 
-       return 0;
+       /* After reset FORCE_LINK_GOOD bit is set. Although the
+        * default value should be unset. Disable FORCE_LINK_GOOD
+        * for the phy to work properly.
+        */
+       return phy_modify(phydev, MII_DP83867_PHYCTRL,
+                        DP83867_PHYCR_FORCE_LINK_GOOD, 0);
 }
 
 static struct phy_driver dp83867_driver[] = {
index 0887ed2bb0500ca702f20a6d9fd9feef7a0df61e..b13c52873ef5d75cbf8ed473db077a4fceb8d71d 100644 (file)
@@ -553,7 +553,7 @@ static const struct device_type mdio_bus_phy_type = {
        .pm = MDIO_BUS_PHY_PM_OPS,
 };
 
-static int phy_request_driver_module(struct phy_device *dev, int phy_id)
+static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
 {
        int ret;
 
@@ -565,15 +565,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id)
         * then modprobe isn't available.
         */
        if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) {
-               phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n",
-                          ret, phy_id);
+               phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n",
+                          ret, (unsigned long)phy_id);
                return ret;
        }
 
        return 0;
 }
 
-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
                                     bool is_c45,
                                     struct phy_c45_device_ids *c45_ids)
 {
index 9a616d6bc4eb22dfd484a69f48b12ba077eca393..ee7a718662c6b4d56da00ad947f85e50d86db537 100644 (file)
@@ -442,8 +442,7 @@ static void phylink_mac_link_up(struct phylink *pl,
 
        pl->cur_interface = link_state.interface;
        pl->ops->mac_link_up(pl->config, pl->link_an_mode,
-                            pl->phy_state.interface,
-                            pl->phydev);
+                            pl->cur_interface, pl->phydev);
 
        if (ndev)
                netif_carrier_on(ndev);
@@ -567,6 +566,9 @@ static int phylink_register_sfp(struct phylink *pl,
        struct sfp_bus *bus;
        int ret;
 
+       if (!fwnode)
+               return 0;
+
        bus = sfp_bus_find_fwnode(fwnode);
        if (IS_ERR(bus)) {
                ret = PTR_ERR(bus);
index 2a91c192659ffb2762e9d1f821976ce362ac1272..61d7e0d1d77db8deaa8c7260ab0dde4945dae950 100644 (file)
@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
  */
 static void slip_write_wakeup(struct tty_struct *tty)
 {
-       struct slip *sl = tty->disc_data;
+       struct slip *sl;
+
+       rcu_read_lock();
+       sl = rcu_dereference(tty->disc_data);
+       if (!sl)
+               goto out;
 
        schedule_work(&sl->tx_work);
+out:
+       rcu_read_unlock();
 }
 
 static void sl_tx_timeout(struct net_device *dev)
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
                return;
 
        spin_lock_bh(&sl->lock);
-       tty->disc_data = NULL;
+       rcu_assign_pointer(tty->disc_data, NULL);
        sl->tty = NULL;
        spin_unlock_bh(&sl->lock);
 
+       synchronize_rcu();
        flush_work(&sl->tx_work);
 
        /* VSV = very important to remove timers */
index 683d371e6e82063bec7ade102a3b40aa38b1b6af..35e884a8242d95a4e866ad740a234face2fa0ac0 100644 (file)
@@ -1936,6 +1936,10 @@ drop:
                        if (ret != XDP_PASS) {
                                rcu_read_unlock();
                                local_bh_enable();
+                               if (frags) {
+                                       tfile->napi.skb = NULL;
+                                       mutex_unlock(&tfile->napi_mutex);
+                               }
                                return total_len;
                        }
                }
index cf1f3f0a4b9bee6ad47792824e3be4c6d5634f85..c2a58f05b9a1ccd5940b4321c7ef62b895145c71 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mdio.h>
 #include <linux/phy.h>
 #include <net/ip6_checksum.h>
+#include <net/vxlan.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
@@ -511,7 +512,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
                }
        } else {
                netdev_warn(dev->net,
-                           "Failed to read stat ret = 0x%x", ret);
+                           "Failed to read stat ret = %d", ret);
        }
 
        kfree(stats);
@@ -1808,6 +1809,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
        dev->mdiobus->read = lan78xx_mdiobus_read;
        dev->mdiobus->write = lan78xx_mdiobus_write;
        dev->mdiobus->name = "lan78xx-mdiobus";
+       dev->mdiobus->parent = &dev->udev->dev;
 
        snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
                 dev->udev->bus->busnum, dev->udev->devnum);
@@ -2723,11 +2725,6 @@ static int lan78xx_stop(struct net_device *net)
        return 0;
 }
 
-static int lan78xx_linearize(struct sk_buff *skb)
-{
-       return skb_linearize(skb);
-}
-
 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
                                       struct sk_buff *skb, gfp_t flags)
 {
@@ -2739,8 +2736,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
                return NULL;
        }
 
-       if (lan78xx_linearize(skb) < 0)
+       if (skb_linearize(skb)) {
+               dev_kfree_skb_any(skb);
                return NULL;
+       }
 
        tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
 
@@ -3670,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
        tasklet_schedule(&dev->bh);
 }
 
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+                                               struct net_device *netdev,
+                                               netdev_features_t features)
+{
+       if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+               features &= ~NETIF_F_GSO_MASK;
+
+       features = vlan_features_check(skb, features);
+       features = vxlan_features_check(skb, features);
+
+       return features;
+}
+
 static const struct net_device_ops lan78xx_netdev_ops = {
        .ndo_open               = lan78xx_open,
        .ndo_stop               = lan78xx_stop,
@@ -3683,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
        .ndo_set_features       = lan78xx_set_features,
        .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
+       .ndo_features_check     = lan78xx_features_check,
 };
 
 static void lan78xx_stat_monitor(struct timer_list *t)
@@ -3752,6 +3765,7 @@ static int lan78xx_probe(struct usb_interface *intf,
 
        /* MTU range: 68 - 9000 */
        netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+       netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
 
        dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
        dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
index 4196c0e3274036f3fb81c66b2f8921473c6d0ad3..9485c8d1de8a37c78b210dd2a9b41aea5c1c2eb7 100644 (file)
@@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)},     /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)},     /* Quectel EP06/EG06/EM06 */
        {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)},     /* Quectel EG12/EM12 */
+       {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)},     /* Quectel RM500Q-GL */
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
index c5ebf35d2488423357ca170249f8caeac2b0acd6..3f425f974d03cfaf311d5bc2c21bac0599b488a8 100644 (file)
@@ -31,7 +31,7 @@
 #define NETNEXT_VERSION                "11"
 
 /* Information for net */
-#define NET_VERSION            "10"
+#define NET_VERSION            "11"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -68,6 +68,7 @@
 #define PLA_LED_FEATURE                0xdd92
 #define PLA_PHYAR              0xde00
 #define PLA_BOOT_CTRL          0xe004
+#define PLA_LWAKE_CTRL_REG     0xe007
 #define PLA_GPHY_INTR_IMR      0xe022
 #define PLA_EEE_CR             0xe040
 #define PLA_EEEP_CR            0xe080
@@ -95,6 +96,7 @@
 #define PLA_TALLYCNT           0xe890
 #define PLA_SFF_STS_7          0xe8de
 #define PLA_PHYSTATUS          0xe908
+#define PLA_CONFIG6            0xe90a /* CONFIG6 */
 #define PLA_BP_BA              0xfc26
 #define PLA_BP_0               0xfc28
 #define PLA_BP_1               0xfc2a
 #define PLA_BP_EN              0xfc38
 
 #define USB_USB2PHY            0xb41e
+#define USB_SSPHYLINK1         0xb426
 #define USB_SSPHYLINK2         0xb428
 #define USB_U2P3_CTRL          0xb460
 #define USB_CSR_DUMMY1         0xb464
 #define LINK_ON_WAKE_EN                0x0010
 #define LINK_OFF_WAKE_EN       0x0008
 
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN         BIT(0)
+
 /* PLA_CONFIG5 */
 #define BWF_EN                 0x0040
 #define MWF_EN                 0x0020
 /* PLA_PHY_PWR */
 #define TX_10M_IDLE_EN         0x0080
 #define PFM_PWM_SWITCH         0x0040
+#define TEST_IO_OFF            BIT(4)
 
 /* PLA_MAC_PWR_CTRL */
 #define D3_CLK_GATED_EN                0x00004000
 #define MAC_CLK_SPDWN_EN       BIT(15)
 
 /* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN       BIT(14)
 #define PKT_AVAIL_SPDWN_EN     0x0100
 #define SUSPEND_SPDWN_EN       0x0004
 #define U1U2_SPDWN_EN          0x0002
 /* PLA_BOOT_CTRL */
 #define AUTOLOAD_DONE          0x0002
 
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN            BIT(7)
+
 /* PLA_SUSPEND_FLAG */
 #define LINK_CHG_EVENT         BIT(0)
 
 #define DEBUG_LTSSM            0x0082
 
 /* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK            BIT(15)
 #define U3P3_CHECK_EN          BIT(7)  /* RTL_VER_05 only */
 #define LINK_CHANGE_FLAG       BIT(8)
+#define POLL_LINK_CHG          BIT(0)
 
 /* USB_USB2PHY */
 #define USB2PHY_SUSPEND                0x0001
 #define USB2PHY_L1             0x0002
 
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG      BIT(1)
+
 /* USB_SSPHYLINK2 */
 #define pwd_dn_scale_mask      0x3ffe
 #define pwd_dn_scale(x)                ((x) << 1)
@@ -2863,6 +2879,17 @@ static int rtl8153_enable(struct r8152 *tp)
        r8153_set_rx_early_timeout(tp);
        r8153_set_rx_early_size(tp);
 
+       if (tp->version == RTL_VER_09) {
+               u32 ocp_data;
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+               ocp_data &= ~FC_PATCH_TASK;
+               ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+               usleep_range(1000, 2000);
+               ocp_data |= FC_PATCH_TASK;
+               ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+       }
+
        return rtl_enable(tp);
 }
 
@@ -3376,8 +3403,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
                r8153b_ups_en(tp, false);
                r8153_queue_wake(tp, false);
                rtl_runtime_suspend_enable(tp, false);
-               r8153_u2p3en(tp, true);
-               r8153b_u1u2en(tp, true);
+               if (tp->udev->speed != USB_SPEED_HIGH)
+                       r8153b_u1u2en(tp, true);
        }
 }
 
@@ -4675,7 +4702,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
 
        r8153_aldps_en(tp, true);
        r8152b_enable_fc(tp);
-       r8153_u2p3en(tp, true);
 
        set_bit(PHY_RESET, &tp->flags);
 }
@@ -4954,6 +4980,8 @@ static void rtl8152_down(struct r8152 *tp)
 
 static void rtl8153_up(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -4961,6 +4989,19 @@ static void rtl8153_up(struct r8152 *tp)
        r8153_u2p3en(tp, false);
        r8153_aldps_en(tp, false);
        r8153_first_init(tp);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data |= LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+       ocp_data &= ~LANWAKE_PIN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+       ocp_data &= ~DELAY_PHY_PWR_CHG;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
        r8153_aldps_en(tp, true);
 
        switch (tp->version) {
@@ -4979,11 +5020,17 @@ static void rtl8153_up(struct r8152 *tp)
 
 static void rtl8153_down(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
 
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data &= ~LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
        r8153_u1u2en(tp, false);
        r8153_u2p3en(tp, false);
        r8153_power_cut_en(tp, false);
@@ -4994,6 +5041,8 @@ static void rtl8153_down(struct r8152 *tp)
 
 static void rtl8153b_up(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -5004,18 +5053,29 @@ static void rtl8153b_up(struct r8152 *tp)
        r8153_first_init(tp);
        ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data &= ~PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
        r8153_aldps_en(tp, true);
-       r8153_u2p3en(tp, true);
-       r8153b_u1u2en(tp, true);
+
+       if (tp->udev->speed != USB_SPEED_HIGH)
+               r8153b_u1u2en(tp, true);
 }
 
 static void rtl8153b_down(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data |= PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
        r8153b_u1u2en(tp, false);
        r8153_u2p3en(tp, false);
        r8153b_power_cut_en(tp, false);
@@ -5387,6 +5447,16 @@ static void r8153_init(struct r8152 *tp)
                else
                        ocp_data |= DYNAMIC_BURST;
                ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+               r8153_queue_wake(tp, false);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+               if (rtl8152_get_speed(tp) & LINK_STATUS)
+                       ocp_data |= CUR_LINK_OK;
+               else
+                       ocp_data &= ~CUR_LINK_OK;
+               ocp_data |= POLL_LINK_CHG;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
        }
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -5416,10 +5486,19 @@ static void r8153_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
 
        r8153_power_cut_en(tp, false);
+       rtl_runtime_suspend_enable(tp, false);
        r8153_u1u2en(tp, true);
        r8153_mac_clk_spd(tp, false);
        usb_enable_lpm(tp->udev);
 
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data |= LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+       ocp_data &= ~LANWAKE_PIN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@@ -5484,7 +5563,17 @@ static void r8153b_init(struct r8152 *tp)
        r8153b_ups_en(tp, false);
        r8153_queue_wake(tp, false);
        rtl_runtime_suspend_enable(tp, false);
-       r8153b_u1u2en(tp, true);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+       if (rtl8152_get_speed(tp) & LINK_STATUS)
+               ocp_data |= CUR_LINK_OK;
+       else
+               ocp_data &= ~CUR_LINK_OK;
+       ocp_data |= POLL_LINK_CHG;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+       if (tp->udev->speed != USB_SPEED_HIGH)
+               r8153b_u1u2en(tp, true);
        usb_enable_lpm(tp->udev);
 
        /* MAC clock speed down */
@@ -5492,6 +5581,19 @@ static void r8153b_init(struct r8152 *tp)
        ocp_data |= MAC_CLK_SPDWN_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data &= ~PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+       if (tp->version == RTL_VER_09) {
+               /* Disable Test IO for 32QFN */
+               if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+                       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+                       ocp_data |= TEST_IO_OFF;
+                       ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+               }
+       }
+
        set_bit(GREEN_ETHERNET, &tp->flags);
 
        /* rx aggregation */
@@ -6597,6 +6699,9 @@ static int rtl8152_probe(struct usb_interface *intf,
                return -ENODEV;
        }
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+               return -ENODEV;
+
        usb_reset_device(udev);
        netdev = alloc_etherdev(sizeof(struct r8152));
        if (!netdev) {
@@ -6704,6 +6809,11 @@ static int rtl8152_probe(struct usb_interface *intf,
 
        intf->needs_remote_wakeup = 1;
 
+       if (!rtl_can_wakeup(tp))
+               __rtl_set_wol(tp, 0);
+       else
+               tp->saved_wolopts = __rtl_get_wol(tp);
+
        tp->rtl_ops.init(tp);
 #if IS_BUILTIN(CONFIG_USB_RTL8152)
        /* Retry in case request_firmware() is not ready yet. */
@@ -6721,10 +6831,6 @@ static int rtl8152_probe(struct usb_interface *intf,
                goto out1;
        }
 
-       if (!rtl_can_wakeup(tp))
-               __rtl_set_wol(tp, 0);
-
-       tp->saved_wolopts = __rtl_get_wol(tp);
        if (tp->saved_wolopts)
                device_set_wakeup_enable(&udev->dev, true);
        else
index 34c1eaba536c05d2746c3fa3bd278d4805e94672..389d19dd7909ccfc136232eb4d51ee7e3a033560 100644 (file)
@@ -865,7 +865,7 @@ static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
        u16 len;
        bool need_tail;
 
-       BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+       BUILD_BUG_ON(sizeof_field(struct usbnet, data)
                                < sizeof(struct cdc_state));
 
        dev_dbg(&dev->udev->dev, "%s", __func__);
index 30e511c2c8d017f7f168dd3ef839314627e7ee53..9ce6d30576dde8579afb4ad750be6e762e9dfbd3 100644 (file)
@@ -2184,7 +2184,7 @@ static int __init usbnet_init(void)
 {
        /* Compiler should optimize this out. */
        BUILD_BUG_ON(
-               FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
+               sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
 
        eth_random_addr(node_id);
        return 0;
index 4c34375c2e22096e8fd6ca47a5de1b678c06bcf2..1c5159dcc72024864000b8788a012b9907cf4204 100644 (file)
@@ -2541,7 +2541,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                ndst = &rt->dst;
                skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
 
-               tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+               tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
                err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
                                      vni, md, flags, udp_sum);
@@ -2581,7 +2581,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
 
-               tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+               tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                skb_scrub_packet(skb, xnet);
                err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
@@ -3069,10 +3069,10 @@ static void vxlan_raw_setup(struct net_device *dev)
 
 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_ID]         = { .type = NLA_U32 },
-       [IFLA_VXLAN_GROUP]      = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_VXLAN_GROUP]      = { .len = sizeof_field(struct iphdr, daddr) },
        [IFLA_VXLAN_GROUP6]     = { .len = sizeof(struct in6_addr) },
        [IFLA_VXLAN_LINK]       = { .type = NLA_U32 },
-       [IFLA_VXLAN_LOCAL]      = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+       [IFLA_VXLAN_LOCAL]      = { .len = sizeof_field(struct iphdr, saddr) },
        [IFLA_VXLAN_LOCAL6]     = { .len = sizeof(struct in6_addr) },
        [IFLA_VXLAN_TOS]        = { .type = NLA_U8 },
        [IFLA_VXLAN_TTL]        = { .type = NLA_U8 },
index ca0f3be2b6bf8c4c3c4ed330651dabd8bb756ff8..aef7de225783f6bd19c0ee2d9e52f5a11a7fcf81 100644 (file)
@@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = {
        },
 };
 
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
 
 static int uhdlc_init(struct ucc_hdlc_private *priv)
 {
index 0f1217b506ad2cac104fd6d2d1d3262e5cc4ada9..e30d91a38cfb637b376eab0e963ab0ade09fbbb5 100644 (file)
@@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
 {
        struct lapbethdev *lapbeth;
 
-       list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+       list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
                if (lapbeth->ethdev == dev) 
                        return lapbeth;
        }
index e2e679a01b65a2570f8bcd88870929622f3b7264..77ccf3672ede7db77071d0cba13ad49ba8f3e4b3 100644 (file)
@@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb,
 
                                        spin_lock_irqsave(&sdla_lock, flags);
                                        SDLA_WINDOW(dev, addr);
-                                       pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+                                       pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
                                        __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
                                        SDLA_WINDOW(dev, addr);
                                        pbuf->opp_flag = 1;
index 34e94ee806d65c1310a0b197b85c5709dbf46611..23f93f1c815d0914d3936cfebf2d7c542fc095bd 100644 (file)
@@ -635,7 +635,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
        /* set up PLX mapping */
        plx_phy = pci_resource_start(pdev, 0);
 
-       card->plx = ioremap_nocache(plx_phy, 0x70);
+       card->plx = ioremap(plx_phy, 0x70);
        if (!card->plx) {
                pr_err("ioremap() failed\n");
                wanxl_pci_remove_one(pdev);
@@ -704,7 +704,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
                                               PCI_DMA_FROMDEVICE);
        }
 
-       mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+       mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
        if (!mem) {
                pr_err("ioremap() failed\n");
                wanxl_pci_remove_one(pdev);
index f80854180e2158b6e03dc1fcd79552be8c237415..ed87bc00f2aa81dd5a8c94aad4de238ef3224c6b 100644 (file)
@@ -458,7 +458,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
 
        ar_ahb->mem_len = resource_size(res);
 
-       ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+       ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
                                          ATH10K_GCC_REG_SIZE);
        if (!ar_ahb->gcc_mem) {
                ath10k_err(ar, "gcc mem ioremap error\n");
@@ -466,7 +466,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
                goto err_mem_unmap;
        }
 
-       ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+       ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
                                           ATH10K_TCSR_REG_SIZE);
        if (!ar_ahb->tcsr_mem) {
                ath10k_err(ar, "tcsr mem ioremap error\n");
index 83cc8778ca1e31aabf5a5f56c6a0520d10216706..978f0037ed522e6047543cfee078110986c6880e 100644 (file)
@@ -8958,6 +8958,7 @@ int ath10k_mac_register(struct ath10k *ar)
        wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
        wiphy_ext_feature_set(ar->hw->wiphy,
                              NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+       wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL);
 
        if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
            test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
index ab916459d2375fb2f6d093b4f230e2892ffd477d..842e42ec814f4b40794ec5231cd4fbef87246cc4 100644 (file)
@@ -239,7 +239,7 @@ TRACE_EVENT(ath10k_wmi_dbglog,
        TP_STRUCT__entry(
                __string(device, dev_name(ar->dev))
                __string(driver, dev_driver_string(ar->dev))
-               __field(u8, hw_type);
+               __field(u8, hw_type)
                __field(size_t, buf_len)
                __dynamic_array(u8, buf, buf_len)
        ),
@@ -269,7 +269,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
        TP_STRUCT__entry(
                __string(device, dev_name(ar->dev))
                __string(driver, dev_driver_string(ar->dev))
-               __field(u8, hw_type);
+               __field(u8, hw_type)
                __field(u16, buf_len)
                __dynamic_array(u8, pktlog, buf_len)
        ),
@@ -435,7 +435,7 @@ TRACE_EVENT(ath10k_htt_rx_desc,
        TP_STRUCT__entry(
                __string(device, dev_name(ar->dev))
                __string(driver, dev_driver_string(ar->dev))
-               __field(u8, hw_type);
+               __field(u8, hw_type)
                __field(u16, len)
                __dynamic_array(u8, rxdesc, len)
        ),
index c0794f5988b348a1877e3dda52777c0a05f5cb85..2c9cec8b53d9e22a2e7683b92ac0feb23593fc2d 100644 (file)
@@ -106,7 +106,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
                goto err_out;
        }
 
-       mem = ioremap_nocache(res->start, resource_size(res));
+       mem = ioremap(res->start, resource_size(res));
        if (mem == NULL) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
index 63019c3de034d4771974b8d5aa01d73bae2dc74a..cdefb8e2daf1437e98309afdd01ca218a7de554e 100644 (file)
@@ -92,7 +92,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+       mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
        if (mem == NULL) {
                dev_err(&pdev->dev, "ioremap failed\n");
                return -ENOMEM;
index 956fa7828d0c8c668531928ac86924c12859d5d7..56d1a7764b9f8746819898a2247a9bf83c3da3c2 100644 (file)
@@ -83,7 +83,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
                        val = swahb32(val);
                }
 
-               __raw_writel(val, mem + reg);
+               iowrite32(val, mem + reg);
                usleep_range(100, 120);
        }
 
index f64ce5074a55a497cc01f3803cb0879635d762a0..c85840cabebe6c5d53bde4f3875e5e65644f937d 100644 (file)
@@ -1643,8 +1643,8 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
                return -EINVAL;
        }
 
-       devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
-       devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
+       devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
+       devinfo->tcm = ioremap(bar1_addr, bar1_size);
 
        if (!devinfo->regs || !devinfo->tcm) {
                brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
index f43c06569ea1c133ac4c921b8a8a639e079ea50f..c4c8f1b62e1ec4a040b7079f97df4cb9cb39775a 100644 (file)
@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
        case AIROGVLIST:    ridcode = RID_APLIST;       break;
        case AIROGDRVNAM:   ridcode = RID_DRVNAME;      break;
        case AIROGEHTENC:   ridcode = RID_ETHERENCAP;   break;
-       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
-       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
+       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;     break;
+       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;     break;
        case AIROGSTAT:     ridcode = RID_STATUS;       break;
        case AIROGSTATSD32: ridcode = RID_STATSDELTA;   break;
        case AIROGSTATSC32: ridcode = RID_STATS;        break;
@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
                return -EINVAL;
        }
 
-       if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+       if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+               /* Only super-user can read WEP keys */
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+       }
+
+       if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
                return -ENOMEM;
 
        PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
index c4c83ab60cbc46f184d140e940a8da44c84513e7..e85858eec8ffb0253c0c588d49830cc30b8ce5fd 100644 (file)
@@ -6167,7 +6167,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        ioaddr = pci_iomap(pci_dev, 0, 0);
        if (!ioaddr) {
                printk(KERN_WARNING DRV_NAME
-                      "Error calling ioremap_nocache.\n");
+                      "Error calling ioremap.\n");
                err = -EIO;
                goto fail;
        }
index cd73fc5cfcbb21c537cde3e05e7025f6387edcc4..fd454836adbed32e228325c0a3f6cf4f65e4c8b0 100644 (file)
@@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_station_priv *sta_priv = NULL;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        __le16 fc;
        u8 hdr_len;
@@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        if (unlikely(!dev_cmd))
                goto drop_unlock_priv;
 
-       memset(dev_cmd, 0, sizeof(*dev_cmd));
        dev_cmd->hdr.cmd = REPLY_TX;
        tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 
index 40fe2d66762250faf5b0967dfd76313d859440f4..48d375a86d8627cd2a0927f4c396926de6a614bf 100644 (file)
@@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
 {
        union acpi_object *wifi_pkg, *data;
        bool enabled;
-       int i, n_profiles, tbl_rev;
-       int  ret = 0;
+       int i, n_profiles, tbl_rev, pos;
+       int ret = 0;
 
        data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
        if (IS_ERR(data))
@@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
                goto out_free;
        }
 
-       for (i = 0; i < n_profiles; i++) {
-               /* the tables start at element 3 */
-               int pos = 3;
+       /* the tables start at element 3 */
+       pos = 3;
 
+       for (i = 0; i < n_profiles; i++) {
                /* The EWRD profiles officially go from 2 to 4, but we
                 * save them in sar_profiles[1-3] (because we don't
                 * have profile 0).  So in the array we start from 1.
index ed90dd104366ab372cba7b3bcf3ca3b33bc05e26..4c60f9959f7bfcebab964ab9280a0c25a1032235 100644 (file)
@@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
 {
        int ret = 0;
 
-       /* if the FW crashed or not debug monitor cfg was given, there is
-        * no point in changing the recording state
-        */
-       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
-           (!fwrt->trans->dbg.dest_tlv &&
-            fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
                return 0;
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
index 92d9898ab7c24df5a52799b538d36cf16ab00a3e..c2f7252ae4e712878addb01ab84983ab1df36cdf 100644 (file)
@@ -379,7 +379,7 @@ enum {
 
 
 /* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED    (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED   (0x00000002)
 
 /*
  * UCODE-DRIVER GP (general purpose) mailbox register 1
index f266647dc08c83ecbf543edb62ee74854ef9d98d..ce8f248c33ea97a1333811dc7d49145786cf9425 100644 (file)
@@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
        if (!frag || frag->size || !pages)
                return -EIO;
 
-       while (pages) {
+       /*
+        * We try to allocate as many pages as we can, starting with
+        * the requested amount and going down until we can allocate
+        * something.  Because of DIV_ROUND_UP(), pages will never go
+        * down to 0 and stop the loop, so stop when pages reaches 1,
+        * which is too small anyway.
+        */
+       while (pages > 1) {
                block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
                                           &physical,
                                           GFP_KERNEL | __GFP_NOWARN);
index 4096ccf58b070d74559f40300d8e52371c2b9812..bc8c959588ca8cdb1a506b7b8377a476ee29a83e 100644 (file)
@@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling,
 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
 MODULE_PARM_DESC(nvm_file, "NVM file name");
 
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
 MODULE_PARM_DESC(uapsd_disable,
                 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
index ebea3f308b5d27a8b8fd40c2923c519d60be40ad..82e5cac23d8d08e4476cdff4812273e0ec37bbf7 100644 (file)
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
  * @nvm_file: specifies a external NVM file
  * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
  *     IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
  * @fw_monitor: allow to use firmware monitor
  * @disable_11ac: disable VHT capabilities, default = false.
  * @remove_when_gone: remove an inaccessible device from the PCIe bus.
@@ -136,7 +135,6 @@ struct iwl_mod_params {
        int antenna_coupling;
        char *nvm_file;
        u32 uapsd_disable;
-       bool lar_disable;
        bool fw_monitor;
        bool disable_11ac;
        /**
index 1e240a2a83290ef4c4477a25482a337c4394c0f0..d4f834b52f50ca5f89626a870df63d9d5fda24d5 100644 (file)
@@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
        NVM_CHANNEL_DC_HIGH             = BIT(12),
 };
 
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ *     2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ *     5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+       REG_CAPA_BF_CCD_LOW_BAND        = BIT(0),
+       REG_CAPA_BF_CCD_HIGH_BAND       = BIT(1),
+       REG_CAPA_160MHZ_ALLOWED         = BIT(2),
+       REG_CAPA_80MHZ_ALLOWED          = BIT(3),
+       REG_CAPA_MCS_8_ALLOWED          = BIT(4),
+       REG_CAPA_MCS_9_ALLOWED          = BIT(5),
+       REG_CAPA_40MHZ_FORBIDDEN        = BIT(7),
+       REG_CAPA_DC_HIGH_ENABLED        = BIT(9),
+};
+
 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
                                               int chan, u32 flags)
 {
@@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const struct iwl_fw *fw,
                   const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+                  u8 tx_chains, u8 rx_chains)
 {
        struct iwl_nvm_data *data;
        bool lar_enabled;
@@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                return NULL;
        }
 
-       if (lar_fw_supported && lar_enabled)
+       if (lar_enabled &&
+           fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
 
        if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
@@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
 
 static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
                                       int ch_idx, u16 nvm_flags,
+                                      u16 cap_flags,
                                       const struct iwl_cfg *cfg)
 {
        u32 flags = NL80211_RRF_NO_HT40;
@@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
            (flags & NL80211_RRF_NO_IR))
                flags |= NL80211_RRF_GO_CONCURRENT;
 
+       /*
+        * cap_flags is per regulatory domain so apply it for every channel
+        */
+       if (ch_idx >= NUM_2GHZ_CHANNELS) {
+               if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+                       flags |= NL80211_RRF_NO_HT40;
+
+               if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+                       flags |= NL80211_RRF_NO_80MHZ;
+
+               if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+                       flags |= NL80211_RRF_NO_160MHZ;
+       }
+
        return flags;
 }
 
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc,
-                      u16 geo_info)
+                      u16 geo_info, u16 cap)
 {
        int ch_idx;
        u16 ch_flags;
@@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                }
 
                reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
-                                                            ch_flags, cfg);
+                                                            ch_flags, cap,
+                                                            cfg);
 
                /* we can't continue the same rule */
                if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
                .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
        };
        int  ret;
-       bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
-                               fw_has_capa(&fw->ucode_capa,
-                                           IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
        bool empty_otp;
        u32 mac_flags;
        u32 sbands_flags = 0;
@@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
        nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
        nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
 
-       if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+       if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+           fw_has_capa(&fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
                nvm->lar_enabled = true;
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
        }
index b7e1ddf8f177b30ca888094b026917e8fdec5129..fb0b385d10fd5bee1b3061105ed59ff0a82baeb7 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018        Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018        Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
  */
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const struct iwl_fw *fw,
                   const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+                  u8 tx_chains, u8 rx_chains);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
@@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc,
-                      u16 geo_info);
+                      u16 geo_info, u16 cap);
 
 /**
  * struct iwl_nvm_section - describes an NVM section in memory.
index 28bdc9a9617eb5eb0f812b3d160e8caf109d71ae..f91197e4ae4020d8984cb888f06a61d5a256e0a9 100644 (file)
@@ -66,7 +66,9 @@
 
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
-                                 const struct iwl_trans_ops *ops)
+                                 const struct iwl_trans_ops *ops,
+                                 unsigned int cmd_pool_size,
+                                 unsigned int cmd_pool_align)
 {
        struct iwl_trans *trans;
 #ifdef CONFIG_LOCKDEP
@@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
        trans->dev_cmd_pool =
                kmem_cache_create(trans->dev_cmd_pool_name,
-                                 sizeof(struct iwl_device_cmd),
-                                 sizeof(void *),
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
+                                 cmd_pool_size, cmd_pool_align,
+                                 SLAB_HWCACHE_ALIGN, NULL);
        if (!trans->dev_cmd_pool)
                return NULL;
 
index 8cadad7364acac132289df6d35f723e0f7a5e46c..e33df5ad00e0e74bd30f8a209fcc1ef2418e2f26 100644 (file)
@@ -193,6 +193,18 @@ struct iwl_device_cmd {
        };
 } __packed;
 
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+       struct iwl_cmd_header hdr;
+       u8 payload[];
+} __packed;
+
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 
 /*
@@ -544,7 +556,7 @@ struct iwl_trans_ops {
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
        int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
-                 struct iwl_device_cmd *dev_cmd, int queue);
+                 struct iwl_device_tx_cmd *dev_cmd, int queue);
        void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
                        struct sk_buff_head *skbs);
 
@@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
        return trans->ops->dump_data(trans, dump_mask);
 }
 
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 {
-       return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+       return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
 }
 
 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
-                                        struct iwl_device_cmd *dev_cmd)
+                                        struct iwl_device_tx_cmd *dev_cmd)
 {
        kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
 }
 
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                              struct iwl_device_cmd *dev_cmd, int queue)
+                              struct iwl_device_tx_cmd *dev_cmd, int queue)
 {
        if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
                return -EIO;
@@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
  *****************************************************/
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
-                                 const struct iwl_trans_ops *ops);
+                                 const struct iwl_trans_ops *ops,
+                                 unsigned int cmd_pool_size,
+                                 unsigned int cmd_pool_align);
 void iwl_trans_free(struct iwl_trans *trans);
 
 /*****************************************************
index 60aff2ecec121ff11e16f58e503923296b482237..58df25e2fb3236d69fa475593a4e2a5bd4a86173 100644 (file)
 #define IWL_MVM_D3_DEBUG                       false
 #define IWL_MVM_USE_TWT                                false
 #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA       10
+#define IWL_MVM_USE_NSSN_SYNC                  0
 
 #endif /* __MVM_CONSTANTS_H */
index dd685f7eb41044b05817386fa0f0b568d830352f..c09624d8d7ee8455fc4f4b54bbfb4f26e9e39656 100644 (file)
@@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
                return 0;
        }
 
+       if (!mvm->fwrt.ppag_table.enabled) {
+               IWL_DEBUG_RADIO(mvm,
+                               "PPAG not enabled, command not sent.\n");
+               return 0;
+       }
+
        IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
-       IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
-                       mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
 
        for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
                for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
index 32dc9d6f0fb62d2d2218b22b0a6dacf2c29e8fb4..6717f25c46b1c12b894dabdb0f2e363e815218b5 100644 (file)
@@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                                      __le32_to_cpu(resp->n_channels),
                                      resp->channels,
                                      __le16_to_cpu(resp->mcc),
-                                     __le16_to_cpu(resp->geo_info));
+                                     __le16_to_cpu(resp->geo_info),
+                                     __le16_to_cpu(resp->cap));
        /* Store the return source id */
        src_id = resp->source_id;
        kfree(resp);
@@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        return ret;
 }
 
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+                          struct ieee80211_sta *sta)
+{
+       if (likely(sta)) {
+               if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+                       return;
+       } else {
+               if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+                       return;
+       }
+
+       ieee80211_free_txskb(mvm->hw, skb);
+}
+
 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                           struct ieee80211_tx_control *control,
                           struct sk_buff *skb)
@@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                }
        }
 
-       if (sta) {
-               if (iwl_mvm_tx_skb(mvm, skb, sta))
-                       goto drop;
-               return;
-       }
-
-       if (iwl_mvm_tx_skb_non_sta(mvm, skb))
-               goto drop;
+       iwl_mvm_tx_skb(mvm, skb, sta);
        return;
  drop:
        ieee80211_free_txskb(hw, skb);
@@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
                                break;
                        }
 
-                       if (!txq->sta)
-                               iwl_mvm_tx_skb_non_sta(mvm, skb);
-                       else
-                               iwl_mvm_tx_skb(mvm, skb, txq->sta);
+                       iwl_mvm_tx_skb(mvm, skb, txq->sta);
                }
        } while (atomic_dec_return(&mvmtxq->tx_request));
        rcu_read_unlock();
@@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        return ret;
 }
 
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               rinfo->bw = RATE_INFO_BW_20;
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rinfo->bw = RATE_INFO_BW_40;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rinfo->bw = RATE_INFO_BW_80;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rinfo->bw = RATE_INFO_BW_160;
+               break;
+       }
+
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               rinfo->flags |= RATE_INFO_FLAGS_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_HT_MCS_NSS_MSK) + 1;
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_RATE_CODE_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_NSS_MSK) + 1;
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+       } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+               u32 gi_ltf = u32_get_bits(rate_n_flags,
+                                         RATE_MCS_HE_GI_LTF_MSK);
+
+               rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_RATE_CODE_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_NSS_MSK) + 1;
+
+               if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+                       rinfo->bw = RATE_INFO_BW_HE_RU;
+                       rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+               }
+
+               switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+               case RATE_MCS_HE_TYPE_SU:
+               case RATE_MCS_HE_TYPE_EXT_SU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else if (rate_n_flags & RATE_MCS_SGI_MSK)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_MU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_TRIG:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               }
+
+               if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+                       rinfo->he_dcm = 1;
+       } else {
+               switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+               case IWL_RATE_1M_PLCP:
+                       rinfo->legacy = 10;
+                       break;
+               case IWL_RATE_2M_PLCP:
+                       rinfo->legacy = 20;
+                       break;
+               case IWL_RATE_5M_PLCP:
+                       rinfo->legacy = 55;
+                       break;
+               case IWL_RATE_11M_PLCP:
+                       rinfo->legacy = 110;
+                       break;
+               case IWL_RATE_6M_PLCP:
+                       rinfo->legacy = 60;
+                       break;
+               case IWL_RATE_9M_PLCP:
+                       rinfo->legacy = 90;
+                       break;
+               case IWL_RATE_12M_PLCP:
+                       rinfo->legacy = 120;
+                       break;
+               case IWL_RATE_18M_PLCP:
+                       rinfo->legacy = 180;
+                       break;
+               case IWL_RATE_24M_PLCP:
+                       rinfo->legacy = 240;
+                       break;
+               case IWL_RATE_36M_PLCP:
+                       rinfo->legacy = 360;
+                       break;
+               case IWL_RATE_48M_PLCP:
+                       rinfo->legacy = 480;
+                       break;
+               case IWL_RATE_54M_PLCP:
+                       rinfo->legacy = 540;
+                       break;
+               }
+       }
+}
+
 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
                                       struct ieee80211_sta *sta,
@@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
        }
 
+       if (iwl_mvm_has_tlc_offload(mvm)) {
+               struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+               iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+       }
+
        /* if beacon filtering isn't on mac80211 does it anyway */
        if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
                return;
index 3ec8de00f3aa7ded6086e3c8b07978fc579f1478..67ab7e7e9c9dd2f998fbffe7b2df71f38a5fe835 100644 (file)
@@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
        bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
                                   IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
-       if (iwlwifi_mod_params.lar_disable)
-               return false;
-
        /*
         * Enable LAR only if it is supported by the FW (TLV) &&
         * enabled in the NVM
@@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
 int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
                                             u16 len, const void *data,
                                             u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
-                  struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+                      struct ieee80211_sta *sta);
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
index 945c1ea5cda8642285fec3b95349d0c8cbefc4b2..46128a2a9c6e157e9e9923f813876b4f64feeee8 100644 (file)
@@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        struct iwl_nvm_section *sections = mvm->nvm_sections;
        const __be16 *hw;
        const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
-       bool lar_enabled;
        int regulatory_type;
 
        /* Checking for required sections */
-       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+       if (mvm->trans->cfg->nvm_type == IWL_NVM) {
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
                    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
                        IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
                (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
 
-       lar_enabled = !iwlwifi_mod_params.lar_disable &&
-                     fw_has_capa(&mvm->fw->ucode_capa,
-                                 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
-       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
-                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-                                 lar_enabled);
+                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
 }
 
 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
index ef99c49247b7d2949f018328afd817e76caeb3cc..c15f7dbc9516b38f96032124ea6aa011ea648877 100644 (file)
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
 
 static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
 {
-       struct iwl_mvm_rss_sync_notif notif = {
-               .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
-               .metadata.sync = 0,
-               .nssn_sync.baid = baid,
-               .nssn_sync.nssn = nssn,
-       };
-
-       iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+       if (IWL_MVM_USE_NSSN_SYNC) {
+               struct iwl_mvm_rss_sync_notif notif = {
+                       .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+                       .metadata.sync = 0,
+                       .nssn_sync.baid = baid,
+                       .nssn_sync.nssn = nssn,
+               };
+
+               iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
+                                               sizeof(notif));
+       }
 }
 
 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
index a046ac9fa85244b702e368458b32a70d19f2f854..a5af8f4128b1cdeb70a1f4f34e5bb24d822858f8 100644 (file)
@@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
                cmd_size = sizeof(struct iwl_scan_config_v2);
        else
                cmd_size = sizeof(struct iwl_scan_config_v1);
-       cmd_size += num_channels;
+       cmd_size += mvm->fw->ucode_capa.n_scan_channels;
 
        cfg = kzalloc(cmd_size, GFP_KERNEL);
        if (!cfg)
index dc5c02fbc65a4dd7ea3391860e89f6e9815f83cd..ddfc9a668036265d99a55df5873e3dd10b2a0189 100644 (file)
@@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
 /*
  * Allocates and sets the Tx cmd the driver data pointers in the skb
  */
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
                      struct ieee80211_tx_info *info, int hdrlen,
                      struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
 
        dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (unlikely(!dev_cmd))
                return NULL;
 
-       /* Make sure we zero enough of dev_cmd */
-       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
-       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
-       memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
        dev_cmd->hdr.cmd = TX_CMD;
 
        if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -597,7 +592,7 @@ out:
 }
 
 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
-                                      struct iwl_device_cmd *cmd)
+                                      struct iwl_device_tx_cmd *cmd)
 {
        struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 
@@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info info;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        __le16 fc = hdr->frame_control;
@@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct iwl_mvm_sta *mvmsta;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        __le16 fc;
        u16 seq_number = 0;
        u8 tid = IWL_MAX_TID_COUNT;
@@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                spin_unlock(&mvmsta->lock);
-               return 0;
+               return -1;
        }
 
        if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1206,8 +1201,8 @@ drop:
        return -1;
 }
 
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
-                  struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+                      struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct ieee80211_tx_info info;
index d38cefbb779e8e16dcbecb208c2111441b9920e4..e249e3fd14c609c7c135b059dceedaa78f44d532 100644 (file)
 #include "internal.h"
 #include "iwl-prph.h"
 
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+                                                   size_t size,
+                                                   dma_addr_t *phys,
+                                                   int depth)
+{
+       void *result;
+
+       if (WARN(depth > 2,
+                "failed to allocate DMA memory not crossing 2^32 boundary"))
+               return NULL;
+
+       result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+       if (!result)
+               return NULL;
+
+       if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+               void *old = result;
+               dma_addr_t oldphys = *phys;
+
+               result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+                                                               phys,
+                                                               depth + 1);
+               dma_free_coherent(trans->dev, size, old, oldphys);
+       }
+
+       return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+                                                  size_t size,
+                                                  dma_addr_t *phys)
+{
+       return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
 {
        struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
        struct iwl_context_info *ctxt_info;
        struct iwl_context_info_rbd_cfg *rx_cfg;
        u32 control_flags = 0, rb_size;
+       dma_addr_t phys;
        int ret;
 
-       ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
-                                      &trans_pcie->ctxt_info_dma_addr,
-                                      GFP_KERNEL);
+       ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+                                                         sizeof(*ctxt_info),
+                                                         &phys);
        if (!ctxt_info)
                return -ENOMEM;
 
+       trans_pcie->ctxt_info_dma_addr = phys;
+
        ctxt_info->version.version = 0;
        ctxt_info->version.mac_id =
                cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
index 040cec17d3ad68c4e2d38ee1af19f686c1bbc90e..b0b7eca1754ed4255375d2e453d01a8689bba4ce 100644 (file)
@@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* same thing for QuZ... */
        if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
-               if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
-                       iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
-               else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
-                       iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
-               else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
-                       iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
-               else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
-                       iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
-               else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
-                       iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
-               else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
-                       iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+               if (cfg == &iwl_ax101_cfg_qu_hr)
+                       cfg = &iwl_ax101_cfg_quz_hr;
+               else if (cfg == &iwl_ax201_cfg_qu_hr)
+                       cfg = &iwl_ax201_cfg_quz_hr;
+               else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+                       cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+               else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+                       cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+               else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+                       cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+               else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+                       cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
        }
 
 #endif
index a091690f6c799fad751b7759c775740d46692888..f14bcef3495e21a4c9ca436cae8ad2c58ccc3455 100644 (file)
@@ -305,7 +305,7 @@ struct iwl_cmd_meta {
 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 
 struct iwl_pcie_txq_entry {
-       struct iwl_device_cmd *cmd;
+       void *cmd;
        struct sk_buff *skb;
        /* buffer to free after command completes */
        const void *free_buf;
@@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
 /*****************************************************
 * TX / HCMD
 ******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+       return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
 int iwl_pcie_tx_init(struct iwl_trans *trans);
 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
                          int queue_size);
@@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
                                  struct iwl_txq *txq);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                     struct iwl_device_cmd *dev_cmd, int txq_id);
+                     struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
                            struct sk_buff *skb);
 #ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+                                     struct sk_buff *skb);
 #endif
 
 /* common functions that are used by gen3 transport */
@@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                                 unsigned int timeout);
 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                          struct iwl_device_cmd *dev_cmd, int txq_id);
+                          struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
                                  struct iwl_host_cmd *cmd);
 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
index 452da44a21e052f995bd23bcf1d02e8d587d9b17..f0b8ff67a1bc952901165a485477fe5d278a9372 100644 (file)
@@ -1529,13 +1529,13 @@ out:
 
        napi = &rxq->napi;
        if (napi->poll) {
+               napi_gro_flush(napi, false);
+
                if (napi->rx_count) {
                        netif_receive_skb_list(&napi->rx_list);
                        INIT_LIST_HEAD(&napi->rx_list);
                        napi->rx_count = 0;
                }
-
-               napi_gro_flush(napi, false);
        }
 
        iwl_pcie_rxq_restock(trans, rxq);
index 0252716c0b247faa58b0db76a5cba67874efa594..0d8b2a8ffa5d82d7b01f3277c19c172b87079e7b 100644 (file)
 #include "internal.h"
 #include "fw/dbg.h"
 
-static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
-{
-       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
-                         HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
-       udelay(20);
-       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
-                         HPM_HIPM_GEN_CFG_CR_PG_EN |
-                         HPM_HIPM_GEN_CFG_CR_SLP_EN);
-       udelay(20);
-       iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
-                           HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
-
-       iwl_trans_sw_reset(trans);
-       iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-       return 0;
-}
-
 /*
  * Start up NIC's basic functionality after it has been reset
  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
 
        iwl_pcie_apm_config(trans);
 
-       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
-           trans->cfg->integrated) {
-               ret = iwl_pcie_gen2_force_power_gating(trans);
-               if (ret)
-                       return ret;
-       }
-
        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
        if (ret)
                return ret;
index af9bc6b645427ddef9034b0721f421102962155e..f60d66f1e55b0b58d9cbae3025affa169cb165c7 100644 (file)
@@ -79,6 +79,7 @@
 #include "iwl-agn-hw.h"
 #include "fw/error-dump.h"
 #include "fw/dbg.h"
+#include "fw/api/tx.h"
 #include "internal.h"
 #include "iwl-fh.h"
 
@@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
        u16 cap;
 
        /*
-        * HW bug W/A for instability in PCIe bus L0S->L1 transition.
-        * Check if BIOS (or OS) enabled L1-ASPM on this device.
-        * If so (likely), disable L0S, so device moves directly L0->L1;
-        *    costs negligible amount of power savings.
-        * If not (unlikely), enable L0S, so there is at least some
-        *    power savings, even without L1.
+        * L0S states have been found to be unstable with our devices
+        * and in newer hardware they are not officially supported at
+        * all, so we must always set the L0S_DISABLED bit.
         */
+       iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
-               iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-       else
-               iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
 
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -1783,6 +1779,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
        return 0;
 }
 
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+       int ret;
+
+       ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+       if (ret < 0)
+               return ret;
+
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+       udelay(20);
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_PG_EN |
+                         HPM_HIPM_GEN_CFG_CR_SLP_EN);
+       udelay(20);
+       iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                           HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+       iwl_trans_pcie_sw_reset(trans);
+
+       return 0;
+}
+
 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1802,6 +1821,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 
        iwl_trans_pcie_sw_reset(trans);
 
+       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+           trans->cfg->integrated) {
+               err = iwl_pcie_gen2_force_power_gating(trans);
+               if (err)
+                       return err;
+       }
+
        err = iwl_pcie_apm_init(trans);
        if (err)
                return err;
@@ -3430,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 {
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
-       int ret, addr_size;
+       int ret, addr_size, txcmd_size, txcmd_align;
+       const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+       if (!cfg_trans->gen2) {
+               ops = &trans_ops_pcie;
+               txcmd_size = sizeof(struct iwl_tx_cmd);
+               txcmd_align = sizeof(void *);
+       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+               txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+               txcmd_align = 64;
+       } else {
+               txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+               txcmd_align = 128;
+       }
+
+       txcmd_size += sizeof(struct iwl_cmd_header);
+       txcmd_size += 36; /* biggest possible 802.11 header */
+
+       /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+               return ERR_PTR(-EINVAL);
 
        ret = pcim_enable_device(pdev);
        if (ret)
                return ERR_PTR(ret);
 
-       if (cfg_trans->gen2)
-               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                                       &pdev->dev, &trans_ops_pcie_gen2);
-       else
-               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                                       &pdev->dev, &trans_ops_pcie);
-
+       trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+                               txcmd_size, txcmd_align);
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
index 8ca0250de99ec06882b27ff8e176998db97c72c9..bfb984b2e00c6079b36334eb5a611a0206fa843a 100644 (file)
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
        int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
        struct iwl_tfh_tb *tb;
 
+       /*
+        * Only WARN here so we know about the issue, but we mess up our
+        * unmap path because not every place currently checks for errors
+        * returned from this function - it can only return an error if
+        * there's no more space, and so when we know there is enough we
+        * don't always check ...
+        */
+       WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+            "possible DMA problem with iova:0x%llx, len:%d\n",
+            (unsigned long long)addr, len);
+
        if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
                return -EINVAL;
        tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
        return idx;
 }
 
+static struct page *get_workaround_page(struct iwl_trans *trans,
+                                       struct sk_buff *skb)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct page **page_ptr;
+       struct page *ret;
+
+       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+       ret = alloc_page(GFP_ATOMIC);
+       if (!ret)
+               return NULL;
+
+       /* set the chaining pointer to the previous page if there */
+       *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+       *page_ptr = ret;
+
+       return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+                                       struct sk_buff *skb,
+                                       struct iwl_tfh_tfd *tfd,
+                                       dma_addr_t phys, void *virt,
+                                       u16 len, struct iwl_cmd_meta *meta)
+{
+       dma_addr_t oldphys = phys;
+       struct page *page;
+       int ret;
+
+       if (unlikely(dma_mapping_error(trans->dev, phys)))
+               return -ENOMEM;
+
+       if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+               ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+               if (ret < 0)
+                       goto unmap;
+
+               if (meta)
+                       meta->tbs |= BIT(ret);
+
+               ret = 0;
+               goto trace;
+       }
+
+       /*
+        * Work around a hardware bug. If (as expressed in the
+        * condition above) the TB ends on a 32-bit boundary,
+        * then the next TB may be accessed with the wrong
+        * address.
+        * To work around it, copy the data elsewhere and make
+        * a new mapping for it so the device will not fail.
+        */
+
+       if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+               ret = -ENOBUFS;
+               goto unmap;
+       }
+
+       page = get_workaround_page(trans, skb);
+       if (!page) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
+
+       memcpy(page_address(page), virt, len);
+
+       phys = dma_map_single(trans->dev, page_address(page), len,
+                             DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(trans->dev, phys)))
+               return -ENOMEM;
+       ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+       if (ret < 0) {
+               /* unmap the new allocation as single */
+               oldphys = phys;
+               meta = NULL;
+               goto unmap;
+       }
+       IWL_WARN(trans,
+                "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+                len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+       ret = 0;
+unmap:
+       if (meta)
+               dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+       else
+               dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+       trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+       return ret;
+}
+
 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                                     struct sk_buff *skb,
                                     struct iwl_tfh_tfd *tfd, int start_len,
-                                    u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+                                    u8 hdr_len,
+                                    struct iwl_device_tx_cmd *dev_cmd)
 {
 #ifdef CONFIG_INET
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
        u16 length, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
-       struct page **page_ptr;
        struct tso_t tso;
 
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
-       hdr_page = get_page_hdr(trans, hdr_room);
+       hdr_page = get_page_hdr(trans, hdr_room, skb);
        if (!hdr_page)
                return -ENOMEM;
 
-       get_page(hdr_page->page);
        start_hdr = hdr_page->pos;
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-       *page_ptr = hdr_page->page;
 
        /*
         * Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                        dev_kfree_skb(csum_skb);
                        goto out_err;
                }
+               /*
+                * No need for _with_wa, this is from the TSO page and
+                * we leave some space at the end of it so can't hit
+                * the buggy scenario.
+                */
                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
                trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
                                        tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
 
                /* put the payload */
                while (data_left) {
+                       int ret;
+
                        tb_len = min_t(unsigned int, tso.size, data_left);
                        tb_phys = dma_map_single(trans->dev, tso.data,
                                                 tb_len, DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+                       ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+                                                          tb_phys, tso.data,
+                                                          tb_len, NULL);
+                       if (ret) {
                                dev_kfree_skb(csum_skb);
                                goto out_err;
                        }
-                       iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
-                       trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
-                                               tb_phys, tb_len);
 
                        data_left -= tb_len;
                        tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
                                          struct iwl_txq *txq,
-                                         struct iwl_device_cmd *dev_cmd,
+                                         struct iwl_device_tx_cmd *dev_cmd,
                                          struct sk_buff *skb,
                                          struct iwl_cmd_meta *out_meta,
                                          int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
 
        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 
+       /*
+        * No need for _with_wa, the first TB allocation is aligned up
+        * to a 64-byte boundary and thus can't be at the end or cross
+        * a page boundary (much less a 2^32 boundary).
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
        /*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
        tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                goto out_err;
+       /*
+        * No need for _with_wa(), we ensure (via alignment) that the data
+        * here can never cross or end at a page boundary.
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
 
        if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                dma_addr_t tb_phys;
-               int tb_idx;
+               unsigned int fragsz = skb_frag_size(frag);
+               int ret;
 
-               if (!skb_frag_size(frag))
+               if (!fragsz)
                        continue;
 
                tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
-                                          skb_frag_size(frag), DMA_TO_DEVICE);
-
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
-                       return -ENOMEM;
-               tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
-                                             skb_frag_size(frag));
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
-                                       tb_phys, skb_frag_size(frag));
-               if (tb_idx < 0)
-                       return tb_idx;
-
-               out_meta->tbs |= BIT(tb_idx);
+                                          fragsz, DMA_TO_DEVICE);
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  skb_frag_address(frag),
+                                                  fragsz, out_meta);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
                                    struct iwl_txq *txq,
-                                   struct iwl_device_cmd *dev_cmd,
+                                   struct iwl_device_tx_cmd *dev_cmd,
                                    struct sk_buff *skb,
                                    struct iwl_cmd_meta *out_meta,
                                    int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        /* The first TB points to bi-directional DMA data */
        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 
+       /*
+        * No need for _with_wa, the first TB allocation is aligned up
+        * to a 64-byte boundary and thus can't be at the end or cross
+        * a page boundary (much less a 2^32 boundary).
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
        /*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                goto out_err;
+       /*
+        * No need for _with_wa(), we ensure (via alignment) that the data
+        * here can never cross or end at a page boundary.
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
                             IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        tb2_len = skb_headlen(skb) - hdr_len;
 
        if (tb2_len > 0) {
+               int ret;
+
                tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
                                         tb2_len, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  skb->data + hdr_len, tb2_len,
+                                                  NULL);
+               if (ret)
                        goto out_err;
-               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
-                                       tb_phys, tb2_len);
        }
 
        if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
                goto out_err;
 
        skb_walk_frags(skb, frag) {
+               int ret;
+
                tb_phys = dma_map_single(trans->dev, frag->data,
                                         skb_headlen(frag), DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  frag->data,
+                                                  skb_headlen(frag), NULL);
+               if (ret)
                        goto out_err;
-               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
-                                       tb_phys, skb_headlen(frag));
                if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
                        goto out_err;
        }
@@ -538,7 +670,7 @@ out_err:
 static
 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
                                            struct iwl_txq *txq,
-                                           struct iwl_device_cmd *dev_cmd,
+                                           struct iwl_device_tx_cmd *dev_cmd,
                                            struct sk_buff *skb,
                                            struct iwl_cmd_meta *out_meta)
 {
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 }
 
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                          struct iwl_device_cmd *dev_cmd, int txq_id)
+                          struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_cmd_meta *out_meta;
@@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                /* don't put the packet on the ring, if there is no room */
                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
-                       struct iwl_device_cmd **dev_cmd_ptr;
+                       struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
                                               trans_pcie->dev_cmd_offs);
index f21f16ab2a97003d2f3b0ace53e8dd2b3f0fdb50..b0eb52b4951b7b4d22161dccca2f0433b0b32f32 100644 (file)
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        u8 sec_ctl = 0;
        u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
        __le16 bc_ent;
-       struct iwl_tx_cmd *tx_cmd =
-               (void *)txq->entries[txq->write_ptr].cmd->payload;
+       struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        u8 sta_id = tx_cmd->sta_id;
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
        int read_ptr = txq->read_ptr;
        u8 sta_id = 0;
        __le16 bc_ent;
-       struct iwl_tx_cmd *tx_cmd =
-               (void *)txq->entries[read_ptr].cmd->payload;
+       struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
 
        WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
 
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
                            struct sk_buff *skb)
 {
        struct page **page_ptr;
+       struct page *next;
 
        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+       next = *page_ptr;
+       *page_ptr = NULL;
 
-       if (*page_ptr) {
-               __free_page(*page_ptr);
-               *page_ptr = NULL;
+       while (next) {
+               struct page *tmp = next;
+
+               next = *(void **)(page_address(next) + PAGE_SIZE -
+                                 sizeof(void *));
+               __free_page(tmp);
        }
 }
 
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
                while (!skb_queue_empty(&overflow_skbs)) {
                        struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
-                       struct iwl_device_cmd *dev_cmd_ptr;
+                       struct iwl_device_tx_cmd *dev_cmd_ptr;
 
                        dev_cmd_ptr = *(void **)((u8 *)skb->cb +
                                                 trans_pcie->dev_cmd_offs);
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 }
 
 #ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+                                     struct sk_buff *skb)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+       struct page **page_ptr;
+
+       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+       if (WARN_ON(*page_ptr))
+               return NULL;
 
        if (!p->page)
                goto alloc;
 
-       /* enough room on this page */
-       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
-               return p;
+       /*
+        * Check if there's enough room on this page
+        *
+        * Note that we put a page chaining pointer *last* in the
+        * page - we need it somewhere, and if it's there then we
+        * avoid DMA mapping the last bits of the page which may
+        * trigger the 32-bit boundary hardware bug.
+        *
+        * (see also get_workaround_page() in tx-gen2.c)
+        */
+       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+                          sizeof(void *))
+               goto out;
 
        /* We don't have enough room on this page, get a new one. */
        __free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
        if (!p->page)
                return NULL;
        p->pos = page_address(p->page);
+       /* set the chaining pointer to NULL */
+       *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+       *page_ptr = p->page;
+       get_page(p->page);
        return p;
 }
 
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_txq *txq, u8 hdr_len,
                                   struct iwl_cmd_meta *out_meta,
-                                  struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                                  struct iwl_device_tx_cmd *dev_cmd,
+                                  u16 tb1_len)
 {
        struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
        u16 length, iv_len, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
-       struct page **page_ptr;
        struct tso_t tso;
 
        /* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
-       hdr_page = get_page_hdr(trans, hdr_room);
+       hdr_page = get_page_hdr(trans, hdr_room, skb);
        if (!hdr_page)
                return -ENOMEM;
 
-       get_page(hdr_page->page);
        start_hdr = hdr_page->pos;
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-       *page_ptr = hdr_page->page;
        memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
        hdr_page->pos += iv_len;
 
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_txq *txq, u8 hdr_len,
                                   struct iwl_cmd_meta *out_meta,
-                                  struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                                  struct iwl_device_tx_cmd *dev_cmd,
+                                  u16 tb1_len)
 {
        /* No A-MSDU without CONFIG_INET */
        WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 #endif /* CONFIG_INET */
 
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                     struct iwl_device_cmd *dev_cmd, int txq_id)
+                     struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                /* don't put the packet on the ring, if there is no room */
                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
-                       struct iwl_device_cmd **dev_cmd_ptr;
+                       struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
                                               trans_pcie->dev_cmd_offs);
index 57edfada0665fda7ec68b7f60128eef3af9a5219..c9401c121a14e6dfd11d41aca19fb8d4f25b706b 100644 (file)
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
        int hw, ap, ap_max = ie[1];
        u8 hw_rate;
 
+       if (ap_max > MAX_RATES) {
+               lbs_deb_assoc("invalid rates\n");
+               return tlv;
+       }
        /* Advance past IE header */
        ie += 2;
 
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
        struct cmd_ds_802_11_ad_hoc_join cmd;
        u8 preamble = RADIO_PREAMBLE_SHORT;
        int ret = 0;
+       int hw, i;
+       u8 rates_max;
+       u8 *rates;
 
        /* TODO: set preamble based on scan result */
        ret = lbs_set_radio(priv, preamble, 1);
@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
        if (!rates_eid) {
                lbs_add_rates(cmd.bss.rates);
        } else {
-               int hw, i;
-               u8 rates_max = rates_eid[1];
-               u8 *rates = cmd.bss.rates;
+               rates_max = rates_eid[1];
+               if (rates_max > MAX_RATES) {
+                       lbs_deb_join("invalid rates");
+                       goto out;
+               }
+               rates = cmd.bss.rates;
                for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
                        u8 hw_rate = lbs_rates[hw].bitrate / 5;
                        for (i = 0; i < rates_max; i++) {
index fe14814af3007c2ee6be5b162042d977d5e7c241..c604613ab506d3c920cf52d54011403751a9b971 100644 (file)
@@ -774,7 +774,7 @@ void lbs_debugfs_remove_one(struct lbs_private *priv)
 
 #ifdef PROC_DEBUG
 
-#define item_size(n)   (FIELD_SIZEOF(struct lbs_private, n))
+#define item_size(n)   (sizeof_field(struct lbs_private, n))
 #define item_addr(n)   (offsetof(struct lbs_private, n))
 
 
index 74e50566db1f2711d3c35a3bc31c0f9bdb767b2c..6dd835f1efc21ae0d5145056a67244be4e0e6de9 100644 (file)
@@ -229,6 +229,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
                            "11D: skip setting domain info in FW\n");
                return 0;
        }
+
+       if (country_ie_len >
+           (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "11D: country_ie_len overflow!, deauth AP\n");
+               return -EINVAL;
+       }
+
        memcpy(priv->adapter->country_code, &country_ie[2], 2);
 
        domain_info->country_code[0] = country_ie[2];
@@ -272,8 +280,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
        priv->scan_block = false;
 
        if (bss) {
-               if (adapter->region_code == 0x00)
-                       mwifiex_process_country_ie(priv, bss);
+               if (adapter->region_code == 0x00 &&
+                   mwifiex_process_country_ie(priv, bss))
+                       return -EINVAL;
 
                /* Allocate and fill new bss descriptor */
                bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
index 09313047beeddd70e984c5faa3bae3dd2f49d907..7caf1d26124a2294e0e70379f9f3983bc071f70f 100644 (file)
@@ -953,59 +953,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
 
                switch (*pos) {
                case WLAN_EID_SUPP_RATES:
+                       if (pos[1] > 32)
+                               return;
                        sta_ptr->tdls_cap.rates_len = pos[1];
                        for (i = 0; i < pos[1]; i++)
                                sta_ptr->tdls_cap.rates[i] = pos[i + 2];
                        break;
 
                case WLAN_EID_EXT_SUPP_RATES:
+                       if (pos[1] > 32)
+                               return;
                        basic = sta_ptr->tdls_cap.rates_len;
+                       if (pos[1] > 32 - basic)
+                               return;
                        for (i = 0; i < pos[1]; i++)
                                sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
                        sta_ptr->tdls_cap.rates_len += pos[1];
                        break;
                case WLAN_EID_HT_CAPABILITY:
-                       memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+                       if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
+                               return;
+                       if (pos[1] != sizeof(struct ieee80211_ht_cap))
+                               return;
+                       /* copy the ie's value into ht_capb*/
+                       memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
                               sizeof(struct ieee80211_ht_cap));
                        sta_ptr->is_11n_enabled = 1;
                        break;
                case WLAN_EID_HT_OPERATION:
-                       memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+                       if (pos > end -
+                           sizeof(struct ieee80211_ht_operation) - 2)
+                               return;
+                       if (pos[1] != sizeof(struct ieee80211_ht_operation))
+                               return;
+                       /* copy the ie's value into ht_oper*/
+                       memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
                               sizeof(struct ieee80211_ht_operation));
                        break;
                case WLAN_EID_BSS_COEX_2040:
+                       if (pos > end - 3)
+                               return;
+                       if (pos[1] != 1)
+                               return;
                        sta_ptr->tdls_cap.coex_2040 = pos[2];
                        break;
                case WLAN_EID_EXT_CAPABILITY:
+                       if (pos > end - sizeof(struct ieee_types_header))
+                               return;
+                       if (pos[1] < sizeof(struct ieee_types_header))
+                               return;
+                       if (pos[1] > 8)
+                               return;
                        memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
                               sizeof(struct ieee_types_header) +
                               min_t(u8, pos[1], 8));
                        break;
                case WLAN_EID_RSN:
+                       if (pos > end - sizeof(struct ieee_types_header))
+                               return;
+                       if (pos[1] < sizeof(struct ieee_types_header))
+                               return;
+                       if (pos[1] > IEEE_MAX_IE_SIZE -
+                           sizeof(struct ieee_types_header))
+                               return;
                        memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
                               sizeof(struct ieee_types_header) +
                               min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
                                     sizeof(struct ieee_types_header)));
                        break;
                case WLAN_EID_QOS_CAPA:
+                       if (pos > end - 3)
+                               return;
+                       if (pos[1] != 1)
+                               return;
                        sta_ptr->tdls_cap.qos_info = pos[2];
                        break;
                case WLAN_EID_VHT_OPERATION:
-                       if (priv->adapter->is_hw_11ac_capable)
-                               memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+                       if (priv->adapter->is_hw_11ac_capable) {
+                               if (pos > end -
+                                   sizeof(struct ieee80211_vht_operation) - 2)
+                                       return;
+                               if (pos[1] !=
+                                   sizeof(struct ieee80211_vht_operation))
+                                       return;
+                               /* copy the ie's value into vhtoper*/
+                               memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
                                       sizeof(struct ieee80211_vht_operation));
+                       }
                        break;
                case WLAN_EID_VHT_CAPABILITY:
                        if (priv->adapter->is_hw_11ac_capable) {
-                               memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+                               if (pos > end -
+                                   sizeof(struct ieee80211_vht_cap) - 2)
+                                       return;
+                               if (pos[1] != sizeof(struct ieee80211_vht_cap))
+                                       return;
+                               /* copy the ie's value into vhtcap*/
+                               memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
                                       sizeof(struct ieee80211_vht_cap));
                                sta_ptr->is_11ac_enabled = 1;
                        }
                        break;
                case WLAN_EID_AID:
-                       if (priv->adapter->is_hw_11ac_capable)
+                       if (priv->adapter->is_hw_11ac_capable) {
+                               if (pos > end - 4)
+                                       return;
+                               if (pos[1] != 2)
+                                       return;
                                sta_ptr->tdls_cap.aid =
                                        get_unaligned_le16((pos + 2));
+                       }
+                       break;
                default:
                        break;
                }
index c386992abcdb870eb3fef637f627f4abab33de44..7cafcecd7b8565fbba414f9f567e98734940edc4 100644 (file)
@@ -36,11 +36,11 @@ struct mwifiex_cb {
 };
 
 /* size/addr for mwifiex_debug_info */
-#define item_size(n)           (FIELD_SIZEOF(struct mwifiex_debug_info, n))
+#define item_size(n)           (sizeof_field(struct mwifiex_debug_info, n))
 #define item_addr(n)           (offsetof(struct mwifiex_debug_info, n))
 
 /* size/addr for struct mwifiex_adapter */
-#define adapter_item_size(n)   (FIELD_SIZEOF(struct mwifiex_adapter, n))
+#define adapter_item_size(n)   (sizeof_field(struct mwifiex_adapter, n))
 #define adapter_item_addr(n)   (offsetof(struct mwifiex_adapter, n))
 
 struct mwifiex_debug_data {
index 53b5a4b2dcc5049c6cb4d42e3571b532cd5a7071..59c187898132abcf6deae4580767fbbd5cbcc2ce 100644 (file)
@@ -281,8 +281,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
 {
        struct mt76_rx_tid *tid = NULL;
 
-       rcu_swap_protected(wcid->aggr[tidno], tid,
-                          lockdep_is_held(&dev->mutex));
+       tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
+                                 lockdep_is_held(&dev->mutex));
        if (tid) {
                mt76_rx_aggr_shutdown(dev, tid);
                kfree_rcu(tid, rcu_head);
index 55116f395f9acee8ab59712a1ff7434a8827512e..a4a785467748647d37657389edd382ab8ff2181b 100644 (file)
@@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
                        return 0;
 
                sband = dev->hw->wiphy->bands[status->band];
-               if (!sband || status->rate_idx > sband->n_bitrates)
+               if (!sband || status->rate_idx >= sband->n_bitrates)
                        return 0;
 
                rate = &sband->bitrates[status->rate_idx];
index b9f2a401041a45a6fc9b2d7bcd101e1cc49cb1b9..96018fd657791980cdf376ebddb577dbd92cf8c2 100644 (file)
@@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
 {
        struct ieee80211_hw *hw = dev->hw;
 
-       mt76_led_cleanup(dev);
+       if (IS_ENABLED(CONFIG_MT76_LEDS))
+               mt76_led_cleanup(dev);
        mt76_tx_status_check(dev, NULL, true);
        ieee80211_unregister_hw(hw);
 }
index a03e2d01fba7cb9dd356c2ad371dfc904929fec4..d1405528b50425c52131a3774b7112280427e370 100644 (file)
@@ -342,8 +342,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
        dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
                 version, fae);
 
-       mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+       memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+              ETH_ALEN);
        mt76_eeprom_override(&dev->mt76);
+       mt76x02_mac_setaddr(dev, dev->mt76.macaddr);
+
        mt76x0_set_chip_cap(dev);
        mt76x0_set_freq_offset(dev);
        mt76x0_set_temp_offset(dev);
index 68dd7bb07ca66c634dfcc7aceb4f95d29487f647..f15ba3de6195a61d7762bf6cad530b79c6befc93 100644 (file)
@@ -628,18 +628,6 @@ err:
 
 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
 {
-       if (queue->tx_irq) {
-               unbind_from_irqhandler(queue->tx_irq, queue);
-               if (queue->tx_irq == queue->rx_irq)
-                       queue->rx_irq = 0;
-               queue->tx_irq = 0;
-       }
-
-       if (queue->rx_irq) {
-               unbind_from_irqhandler(queue->rx_irq, queue);
-               queue->rx_irq = 0;
-       }
-
        if (queue->task) {
                kthread_stop(queue->task);
                queue->task = NULL;
@@ -655,6 +643,18 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
                queue->napi.poll = NULL;
        }
 
+       if (queue->tx_irq) {
+               unbind_from_irqhandler(queue->tx_irq, queue);
+               if (queue->tx_irq == queue->rx_irq)
+                       queue->rx_irq = 0;
+               queue->tx_irq = 0;
+       }
+
+       if (queue->rx_irq) {
+               unbind_from_irqhandler(queue->rx_irq, queue);
+               queue->rx_irq = 0;
+       }
+
        xenvif_unmap_frontend_data_rings(queue);
 }
 
index 4d1909aecd6c41b8eceb9856f175a260697260bb..9f60e4dc5a908dd77a1c9aaea22468244e7f77a4 100644 (file)
@@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
 
        r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios);
        if (r)
-               return r;
+               dev_dbg(dev, "Unable to add GPIO mapping table\n");
 
        phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
        if (IS_ERR(phy->gpiod_en)) {
index 4590fbf82dc2a486709d8904f4288fd06e395712..f5bb7ace2ff57857d88ea28dc89a4923fc033a18 100644 (file)
@@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
                       cmd, sizeof(cmd), false);
 
        rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
-                         &transferred, 0);
+                         &transferred, 5000);
        kfree(buffer);
        if (rc || (transferred != sizeof(cmd))) {
                nfc_err(&phy->udev->dev,
index be110d9cef0221d5dc89c5e75f800db0c1bd22ad..de613c623a2cf032010badf77f2ce58e5b751188 100644 (file)
@@ -507,7 +507,10 @@ int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
        struct s3fwrn5_info *info = nci_get_drvdata(ndev);
        struct s3fwrn5_fw_info *fw_info = &info->fw_info;
 
-       BUG_ON(fw_info->rsp);
+       if (WARN_ON(fw_info->rsp)) {
+               kfree_skb(skb);
+               return -EINVAL;
+       }
 
        fw_info->rsp = skb;
 
index c6439638a41944a0c53d7ca189aafc30a00ccbb0..b9358db83e960a06a8429cb30163c87e7b4e5aa2 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config NVME_CORE
        tristate
+       select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
 
 config BLK_DEV_NVME
        tristate "NVM Express block device"
index dfe37a525f3aff78433229932afa28854332eb90..5dc32b72e7faab7875640106514ab88d4976ebf9 100644 (file)
@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status)
        case NVME_SC_CAP_EXCEEDED:
                return BLK_STS_NOSPC;
        case NVME_SC_LBA_RANGE:
+       case NVME_SC_CMD_INTERRUPTED:
+       case NVME_SC_NS_NOT_READY:
                return BLK_STS_TARGET;
        case NVME_SC_BAD_ATTRIBUTES:
        case NVME_SC_ONCS_NOT_SUPPORTED:
@@ -1735,6 +1737,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
                if (ret)
                        dev_warn(ctrl->device,
                                 "Identify Descriptors failed (%d)\n", ret);
+               if (ret > 0)
+                       ret = 0;
        }
        return ret;
 }
@@ -2852,6 +2856,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                 * admin connect
                 */
                if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+                       dev_err(ctrl->device,
+                               "Mismatching cntlid: Connect %u vs Identify "
+                               "%u, rejecting\n",
+                               ctrl->cntlid, le16_to_cpu(id->cntlid));
                        ret = -EINVAL;
                        goto out_free;
                }
index 679a721ae229aaaf8432dc7206f4adbd3f305ec6..5a70ac395d53a0f724f3f29431c4c32afa235619 100644 (file)
@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op {
 
 struct nvme_fcp_op_w_sgl {
        struct nvme_fc_fcp_op   op;
-       struct scatterlist      sgl[SG_CHUNK_SIZE];
+       struct scatterlist      sgl[NVME_INLINE_SG_CNT];
        uint8_t                 priv[0];
 };
 
@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
            !template->ls_req || !template->fcp_io ||
            !template->ls_abort || !template->fcp_abort ||
            !template->max_hw_queues || !template->max_sgl_segments ||
-           !template->max_dif_sgl_segments || !template->dma_boundary) {
+           !template->max_dif_sgl_segments || !template->dma_boundary ||
+           !template->module) {
                ret = -EINVAL;
                goto out_reghost_failed;
        }
@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref)
 {
        struct nvme_fc_ctrl *ctrl =
                container_of(ref, struct nvme_fc_ctrl, ref);
+       struct nvme_fc_lport *lport = ctrl->lport;
        unsigned long flags;
 
        if (ctrl->ctrl.tagset) {
@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref)
        if (ctrl->ctrl.opts)
                nvmf_free_options(ctrl->ctrl.opts);
        kfree(ctrl);
+       module_put(lport->ops->module);
 }
 
 static void
@@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        freq->sg_table.sgl = freq->first_sgl;
        ret = sg_alloc_table_chained(&freq->sg_table,
                        blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
-                       SG_CHUNK_SIZE);
+                       NVME_INLINE_SG_CNT);
        if (ret)
                return -ENOMEM;
 
@@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, rq_dma_dir(rq));
        if (unlikely(freq->sg_cnt <= 0)) {
-               sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+               sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
                freq->sg_cnt = 0;
                return -EFAULT;
        }
@@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
                        rq_dma_dir(rq));
 
-       sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
 
        freq->sg_cnt = 0;
 }
@@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 static void
 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
 {
-       nvme_stop_keep_alive(&ctrl->ctrl);
+       /*
+        * if state is connecting - the error occurred as part of a
+        * reconnect attempt. The create_association error paths will
+        * clean up any outstanding io.
+        *
+        * if it's a different state - ensure all pending io is
+        * terminated. Given this can delay while waiting for the
+        * aborted io to return, we recheck adapter state below
+        * before changing state.
+        */
+       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
+               nvme_stop_keep_alive(&ctrl->ctrl);
 
-       /* will block will waiting for io to terminate */
-       nvme_fc_delete_association(ctrl);
+               /* will block will waiting for io to terminate */
+               nvme_fc_delete_association(ctrl);
+       }
 
        if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
            !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
@@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                goto out_fail;
        }
 
+       if (!try_module_get(lport->ops->module)) {
+               ret = -EUNATCH;
+               goto out_free_ctrl;
+       }
+
        idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
        if (idx < 0) {
                ret = -ENOSPC;
-               goto out_free_ctrl;
+               goto out_mod_put;
        }
 
        ctrl->ctrl.opts = opts;
@@ -3215,6 +3235,8 @@ out_free_queues:
 out_free_ida:
        put_device(ctrl->dev);
        ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_mod_put:
+       module_put(lport->ops->module);
 out_free_ctrl:
        kfree(ctrl);
 out_fail:
index 3b9cbe0668fa488523f59aa0a12978c0c24fce18..1024fec7914c41b50d8e7087e39a5090131dd5e8 100644 (file)
@@ -28,6 +28,12 @@ extern unsigned int admin_timeout;
 #define NVME_DEFAULT_KATO      5
 #define NVME_KATO_GRACE                10
 
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define  NVME_INLINE_SG_CNT  0
+#else
+#define  NVME_INLINE_SG_CNT  2
+#endif
+
 extern struct workqueue_struct *nvme_wq;
 extern struct workqueue_struct *nvme_reset_wq;
 extern struct workqueue_struct *nvme_delete_wq;
index dcaad5831cee7aaef48758a332ae049d06ef9991..365a2ddbeaa762f84a51106163cc915e2c2919ef 100644 (file)
@@ -68,14 +68,14 @@ static int io_queue_depth = 1024;
 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
 
-static int write_queues;
-module_param(write_queues, int, 0644);
+static unsigned int write_queues;
+module_param(write_queues, uint, 0644);
 MODULE_PARM_DESC(write_queues,
        "Number of queues to use for writes. If not set, reads and writes "
        "will share a queue set.");
 
-static int poll_queues;
-module_param(poll_queues, int, 0644);
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
 
 struct nvme_dev;
@@ -176,7 +176,6 @@ struct nvme_queue {
        u16 sq_tail;
        u16 last_sq_tail;
        u16 cq_head;
-       u16 last_cq_head;
        u16 qid;
        u8 cq_phase;
        u8 sqes;
@@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
         * the irq handler, even if that was on another CPU.
         */
        rmb();
-       if (nvmeq->cq_head != nvmeq->last_cq_head)
-               ret = IRQ_HANDLED;
        nvme_process_cq(nvmeq, &start, &end, -1);
-       nvmeq->last_cq_head = nvmeq->cq_head;
        wmb();
 
        if (start != end) {
@@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
        result = adapter_alloc_sq(dev, qid, nvmeq);
        if (result < 0)
                return result;
-       else if (result)
+       if (result)
                goto release_cq;
 
        nvmeq->cq_vector = vector;
@@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
                .priv           = dev,
        };
        unsigned int irq_queues, this_p_queues;
-       unsigned int nr_cpus = num_possible_cpus();
 
        /*
         * Poll queues don't need interrupts, but we need at least one IO
@@ -2069,10 +2064,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
                this_p_queues = nr_io_queues - 1;
                irq_queues = 1;
        } else {
-               if (nr_cpus < nr_io_queues - this_p_queues)
-                       irq_queues = nr_cpus + 1;
-               else
-                       irq_queues = nr_io_queues - this_p_queues + 1;
+               irq_queues = nr_io_queues - this_p_queues + 1;
        }
        dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
 
@@ -3142,6 +3134,9 @@ static int __init nvme_init(void)
        BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
        BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+
+       write_queues = min(write_queues, num_possible_cpus());
+       poll_queues = min(poll_queues, num_possible_cpus());
        return pci_register_driver(&nvme_driver);
 }
 
index dce59459ed41b9867b3645ae2688d62c57c1d52c..2a47c6c5007e1280a320f9776afe10005e23b98a 100644 (file)
@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set->reserved_tags = 2; /* connect + keep-alive */
                set->numa_node = nctrl->numa_node;
                set->cmd_size = sizeof(struct nvme_rdma_request) +
-                       SG_CHUNK_SIZE * sizeof(struct scatterlist);
+                       NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
                set->driver_data = ctrl;
                set->nr_hw_queues = 1;
                set->timeout = ADMIN_TIMEOUT;
@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
                set->cmd_size = sizeof(struct nvme_rdma_request) +
-                       SG_CHUNK_SIZE * sizeof(struct scatterlist);
+                       NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
                set->driver_data = ctrl;
                set->nr_hw_queues = nctrl->queue_count - 1;
                set->timeout = NVME_IO_TIMEOUT;
@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        }
 
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-       sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
 }
 
 static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
        req->sg_table.sgl = req->first_sgl;
        ret = sg_alloc_table_chained(&req->sg_table,
                        blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
-                       SG_CHUNK_SIZE);
+                       NVME_INLINE_SG_CNT);
        if (ret)
                return -ENOMEM;
 
@@ -1314,7 +1314,7 @@ out:
 out_unmap_sg:
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
 out_free_table:
-       sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
        return ret;
 }
 
index 56c21b5011852b15eaf899e625d5a15b251b14b3..72a7e41f3018acdbbb2d1203e37cfec1c3b48583 100644 (file)
@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
        return len;
 }
 
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+       switch (cdw10 & 0xff) {
+       case NVME_FEAT_HOST_ID:
+               return sizeof(req->sq->ctrl->hostid);
+       default:
+               return 0;
+       }
+}
+
 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
 {
        return le64_to_cpu(cmd->get_log_page.lpo);
@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
        u16 status = 0;
 
-       if (!nvmet_check_data_len(req, 0))
+       if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
                return;
 
        switch (cdw10 & 0xff) {
index b50b53db37462499cafc46c40a0fe77f7a52095b..1c50af6219f321360b05b729a9f382842b6fb46b 100644 (file)
@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
 #define FCLOOP_DMABOUND_4G             0xFFFFFFFF
 
 static struct nvme_fc_port_template fctemplate = {
+       .module                 = THIS_MODULE,
        .localport_delete       = fcloop_localport_delete,
        .remoteport_delete      = fcloop_remoteport_delete,
        .create_queue           = fcloop_create_queue,
index a758bb3d5dd49fbfaee6e79001726a904b644261..4df4ebde208a0465dac1e975304fe4a6fd358c2f 100644 (file)
@@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req)
 {
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
-       sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
        nvme_complete_rq(req);
 }
 
@@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+                               iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
                        nvme_cleanup_cmd(req);
                        return BLK_STS_RESOURCE;
                }
@@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
        ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
        ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
-               SG_CHUNK_SIZE * sizeof(struct scatterlist);
+               NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
        ctrl->admin_tag_set.driver_data = ctrl;
        ctrl->admin_tag_set.nr_hw_queues = 1;
        ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
        ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
-               SG_CHUNK_SIZE * sizeof(struct scatterlist);
+               NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
        ctrl->tag_set.driver_data = ctrl;
        ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
        ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
index c6b87ce2b0cc4170dae2837346c4f3e5a1c7f4cd..fc757ef6eadc527b96efb8001dbe856364aac3a0 100644 (file)
@@ -162,7 +162,7 @@ static const struct of_device_id whitelist_phys[] = {
  * A device which is not a phy is expected to have a compatible string
  * indicating what sort of device it is.
  */
-static bool of_mdiobus_child_is_phy(struct device_node *child)
+bool of_mdiobus_child_is_phy(struct device_node *child)
 {
        u32 phy_id;
 
@@ -187,6 +187,7 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
 
        return false;
 }
+EXPORT_SYMBOL(of_mdiobus_child_is_phy);
 
 /**
  * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
index d93891a05f6033638916d7eda0a560110dcaf4ef..3371e4a0624838516a7ae6bbb62862fd17eed711 100644 (file)
@@ -518,10 +518,11 @@ static int __init of_platform_default_populate_init(void)
 {
        struct device_node *node;
 
+       device_links_supplier_sync_state_pause();
+
        if (!of_have_populated_dt())
                return -ENODEV;
 
-       device_links_supplier_sync_state_pause();
        /*
         * Handle certain compatibles explicitly, since we don't want to create
         * platform_devices for every node in /reserved-memory with a
@@ -545,8 +546,7 @@ arch_initcall_sync(of_platform_default_populate_init);
 
 static int __init of_platform_sync_state_init(void)
 {
-       if (of_have_populated_dt())
-               device_links_supplier_sync_state_resume();
+       device_links_supplier_sync_state_resume();
        return 0;
 }
 late_initcall_sync(of_platform_sync_state_init);
index be7a7d332332d4c29bbbb6353e37a25ed65c3b66..ba43e6a3dc0aeeddab9409fd0adc6339815c841e 100644 (file)
@@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
        BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
        INIT_LIST_HEAD(&opp_table->opp_list);
        kref_init(&opp_table->kref);
-       kref_init(&opp_table->list_kref);
 
        /* Secure the device table modification */
        list_add(&opp_table->node, &opp_tables);
@@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref)
        mutex_unlock(&opp_table_lock);
 }
 
-void _opp_remove_all_static(struct opp_table *opp_table)
-{
-       struct dev_pm_opp *opp, *tmp;
-
-       list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
-               if (!opp->dynamic)
-                       dev_pm_opp_put(opp);
-       }
-
-       opp_table->parsed_static_opps = false;
-}
-
-static void _opp_table_list_kref_release(struct kref *kref)
-{
-       struct opp_table *opp_table = container_of(kref, struct opp_table,
-                                                  list_kref);
-
-       _opp_remove_all_static(opp_table);
-       mutex_unlock(&opp_table_lock);
-}
-
-void _put_opp_list_kref(struct opp_table *opp_table)
-{
-       kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
-                      &opp_table_lock);
-}
-
 void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
 {
        kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+       struct dev_pm_opp *opp, *tmp;
+
+       mutex_lock(&opp_table->lock);
+
+       if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+               goto unlock;
+
+       list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+               if (!opp->dynamic)
+                       dev_pm_opp_put_unlocked(opp);
+       }
+
+unlock:
+       mutex_unlock(&opp_table->lock);
+}
+
 /**
  * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
  * @dev:       device for which we do this operation
@@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
                return;
        }
 
-       _put_opp_list_kref(opp_table);
+       _opp_remove_all_static(opp_table);
 
        /* Drop reference taken by _find_opp_table() */
        dev_pm_opp_put_opp_table(opp_table);
index 1cbb58240b8016383d9f1e9137b7190cff345935..9cd8f0adacae48cffdf58110608b4f328cb7bfb0 100644 (file)
@@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
        struct dev_pm_opp *opp;
 
        /* OPP table is already initialized for the device */
+       mutex_lock(&opp_table->lock);
        if (opp_table->parsed_static_opps) {
-               kref_get(&opp_table->list_kref);
+               opp_table->parsed_static_opps++;
+               mutex_unlock(&opp_table->lock);
                return 0;
        }
 
-       /*
-        * Re-initialize list_kref every time we add static OPPs to the OPP
-        * table as the reference count may be 0 after the last tie static OPPs
-        * were removed.
-        */
-       kref_init(&opp_table->list_kref);
+       opp_table->parsed_static_opps = 1;
+       mutex_unlock(&opp_table->lock);
 
        /* We have opp-table node now, iterate over it and add OPPs */
        for_each_available_child_of_node(opp_table->np, np) {
@@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                        dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
                                ret);
                        of_node_put(np);
-                       return ret;
+                       goto remove_static_opp;
                } else if (opp) {
                        count++;
                }
        }
 
        /* There should be one of more OPP defined */
-       if (WARN_ON(!count))
-               return -ENOENT;
+       if (WARN_ON(!count)) {
+               ret = -ENOENT;
+               goto remove_static_opp;
+       }
 
        list_for_each_entry(opp, &opp_table->opp_list, node)
                pstate_count += !!opp->pstate;
@@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
        if (pstate_count && pstate_count != count) {
                dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
                        count, pstate_count);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto remove_static_opp;
        }
 
        if (pstate_count)
                opp_table->genpd_performance_state = true;
 
-       opp_table->parsed_static_opps = true;
-
        return 0;
+
+remove_static_opp:
+       _opp_remove_all_static(opp_table);
+
+       return ret;
 }
 
 /* Initializes OPP tables based on old-deprecated bindings */
@@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
                if (ret) {
                        dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
                                __func__, freq, ret);
+                       _opp_remove_all_static(opp_table);
                        return ret;
                }
                nr -= 2;
index 01a500e2c40a1437902c9c099f5a1a1df7210528..d14e27102730ce24995e0d8a3a9390b05686b7e0 100644 (file)
@@ -127,11 +127,10 @@ enum opp_table_access {
  * @dev_list:  list of devices that share these OPPs
  * @opp_list:  table of opps
  * @kref:      for reference count of the table.
- * @list_kref: for reference count of the OPP list.
  * @lock:      mutex protecting the opp_list and dev_list.
  * @np:                struct device_node pointer for opp's DT node.
  * @clock_latency_ns_max: Max clock latency in nanoseconds.
- * @parsed_static_opps: True if OPPs are initialized from DT.
+ * @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
  * @shared_opp: OPP is shared between multiple devices.
  * @suspend_opp: Pointer to OPP to be used during device suspend.
  * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
@@ -167,7 +166,6 @@ struct opp_table {
        struct list_head dev_list;
        struct list_head opp_list;
        struct kref kref;
-       struct kref list_kref;
        struct mutex lock;
 
        struct device_node *np;
@@ -176,7 +174,7 @@ struct opp_table {
        /* For backward compatibility with v1 bindings */
        unsigned int voltage_tolerance_v1;
 
-       bool parsed_static_opps;
+       unsigned int parsed_static_opps;
        enum opp_table_access shared_opp;
        struct dev_pm_opp *suspend_opp;
 
index 1c69c404df1149d96ef1abe24767a96077ab199b..e3357e91decb252957d7ae535a5745fb89c91fc7 100644 (file)
@@ -90,7 +90,7 @@ static int _store_optimized_voltages(struct device *dev,
                goto out_map;
        }
 
-       base = ioremap_nocache(res->start, resource_size(res));
+       base = ioremap(res->start, resource_size(res));
        if (!base) {
                dev_err(dev, "Unable to map Efuse registers\n");
                ret = -ENOMEM;
index ad290f79983b940fa4a49dfb479eb71eed681f87..a5507f75b524c47f7b2ee8238a48b730e8b5bbce 100644 (file)
@@ -1534,7 +1534,7 @@ static int __init ccio_probe(struct parisc_device *dev)
        *ioc_p = ioc;
 
        ioc->hw_path = dev->hw_path;
-       ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
+       ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
        if (!ioc->ioc_regs) {
                kfree(ioc);
                return -ENOMEM;
index 2f1cac89ddf5ce036126b059b3103b08e21db891..889d7ce282ebb01e059597e1b2d91c6396f2c10f 100644 (file)
@@ -974,7 +974,7 @@ static int __init dino_probe(struct parisc_device *dev)
        }
 
        dino_dev->hba.dev = dev;
-       dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+       dino_dev->hba.base_addr = ioremap(hpa, 4096);
        dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
        spin_lock_init(&dino_dev->dinosaur_pen);
        dino_dev->hba.iommu = ccio_get_iommu(dev);
index 37a2c5db761d2ffd5b77e6c3ef5e498e3f5bf41d..9d00a24277aa7afd694ae0b7f71a45a31f0e3508 100644 (file)
@@ -354,10 +354,10 @@ static int __init eisa_probe(struct parisc_device *dev)
                        eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
                }
        }
-       eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
+       eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
        if (!eisa_eeprom_addr) {
                result = -ENOMEM;
-               printk(KERN_ERR "EISA: ioremap_nocache failed!\n");
+               printk(KERN_ERR "EISA: ioremap failed!\n");
                goto error_free_irq;
        }
        result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space,
index 32f506f00c8977639a33efb285a10318e4e66397..8a3b0c3a1e92bdcfc50c664a7caea0f7818cd8d6 100644 (file)
@@ -927,7 +927,7 @@ void *iosapic_register(unsigned long hpa)
                return NULL;
        }
 
-       isi->addr = ioremap_nocache(hpa, 4096);
+       isi->addr = ioremap(hpa, 4096);
        isi->isi_hpa = hpa;
        isi->isi_version = iosapic_rd_version(isi);
        isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
index a99e385c68bd6ca716d4ab713fbc05583e4f585b..732b516c7bf849d3b78109065af75b40e05ac95b 100644 (file)
@@ -1134,7 +1134,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
                        ** Postable I/O port space is per PCI host adapter.
                        ** base of 64MB PIOP region
                        */
-                       lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
+                       lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
 
                        sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
                                        (int)lba_dev->hba.bus_num.start);
@@ -1476,7 +1476,7 @@ lba_driver_probe(struct parisc_device *dev)
        u32 func_class;
        void *tmp_obj;
        char *version;
-       void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
+       void __iomem *addr = ioremap(dev->hpa.start, 4096);
        int max;
 
        /* Read HW Rev First */
@@ -1575,7 +1575,7 @@ lba_driver_probe(struct parisc_device *dev)
        } else {
                if (!astro_iop_base) {
                        /* Sprockets PDC uses NPIOP region */
-                       astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
+                       astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
                        pci_port = &lba_astro_port_ops;
                }
 
@@ -1693,7 +1693,7 @@ void __init lba_init(void)
 */
 void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
 {
-       void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
+       void __iomem * base_addr = ioremap(lba->hpa.start, 4096);
 
        imask <<= 2;    /* adjust for hints - 2 more bits */
 
index de8e4e347249179a7cd3085f6a3c6f49e8ffd369..7e112829d2503aebab87cb6a29fd6aa3ee845f17 100644 (file)
@@ -1513,7 +1513,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
 
 static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
 {
-       return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+       return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
 }
 
 static void sba_hw_init(struct sba_device *sba_dev)
@@ -1883,7 +1883,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
        u32 func_class;
        int i;
        char *version;
-       void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
+       void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry *root;
 #endif
index b20651cea09f84095fdbd4c1a304a3d8cd5ce3b5..9bf7fa99b103e1f5f84ff2ed5b9d65c80f88865b 100644 (file)
@@ -719,7 +719,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
-       base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       base = devm_ioremap(dev, res->start, resource_size(res));
        if (!base)
                return -ENOMEM;
 
index 3dd2e26972948dbf5db2086f1dd6eedb3c6df7c3..cfeccd7e9fff29bd5a116ad0d8b4e5ef28c07a34 100644 (file)
@@ -434,7 +434,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
        tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
        tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
 
-       msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+       msix_tbl = ioremap(ep->phys_base + tbl_addr,
                                   PCI_MSIX_ENTRY_SIZE);
        if (!msix_tbl)
                return -EINVAL;
index d9b63bfa5dd786d74c3a14eb219f441f589050e0..94af6f5828a3dd2885b5f9ba6372d7637b8490bc 100644 (file)
@@ -834,10 +834,12 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
        if (!entry)
                return -ENODEV;
 
+       /* store the register number offset to program RC io outbound ATU */
+       offset = size >> 20;
+
        size = resource_size(entry->res);
        pci_addr = entry->res->start - entry->offset;
 
-       offset = size >> 20;
        for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
                err = rockchip_pcie_prog_ob_atu(rockchip,
                                                reg_no + 1 + offset,
index c7709e49f0e497065d528b2fbf753e30f919e3ba..6b43a5455c7af88c0df2f8af5073eac2e5a8d2d7 100644 (file)
@@ -688,7 +688,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
        table_offset &= PCI_MSIX_TABLE_OFFSET;
        phys_addr = pci_resource_start(dev, bir) + table_offset;
 
-       return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+       return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
 }
 
 static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
index e87196cc1a7fba33d37661ad0f73ca1dd1418583..df21e3227b57e711fbb0e33c4624fc1401ce9496 100644 (file)
@@ -184,7 +184,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
                pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
                return NULL;
        }
-       return ioremap_nocache(res->start, resource_size(res));
+       return ioremap(res->start, resource_size(res));
 }
 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 
index 4937a088d7d8460da6ed8fcf2c79494db9b7941b..a3a1a0ea64f45311303544827510ed77ce17d39b 100644 (file)
@@ -1571,7 +1571,7 @@ static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
 
        pci_read_config_dword(dev, 0xF0, &rcba);
        /* use bits 31:14, 16 kB aligned */
-       asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
+       asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
        if (asus_rcba_base == NULL)
                return;
 }
@@ -4784,7 +4784,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
        if (!(rcba & INTEL_LPC_RCBA_ENABLE))
                return -EINVAL;
 
-       rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+       rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
                                   PAGE_ALIGN(INTEL_UPDCR_REG));
        if (!rcba_mem)
                return -ENOMEM;
@@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
 
 #ifdef CONFIG_PCI_ATS
 /*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS.  Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
  */
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
 {
-       pci_info(pdev, "disabling ATS (broken on this device)\n");
+       if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+               return;
+
+       pci_info(pdev, "disabling ATS\n");
        pdev->ats_cap = 0;
 }
 
 /* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
 #endif /* CONFIG_PCI_ATS */
 
 /* Freescale PCIe doesn't support MSI in RC mode */
index 773128f411f1e93e57059280221fbb4cad7d86f7..d704eccc548f62d2a1317ecb0e972893eb49c1e0 100644 (file)
@@ -814,7 +814,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
        if (err) {
                dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
                        err, &res_0->start);
-               goto out_cpuhp_err;
+               return err;
        }
 
        err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
@@ -833,8 +833,6 @@ static int smmu_pmu_probe(struct platform_device *pdev)
 
 out_unregister:
        cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-out_cpuhp_err:
-       put_cpu();
        return err;
 }
 
index 55083c67b2bb0775e55bec195ed6dc617d9819ff..95dca2cb526500325dc56e9dd6a4f55e620a6eb2 100644 (file)
@@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
 
        if (ret < 0) {
                dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
-               goto ddr_perf_err;
+               goto cpuhp_state_err;
        }
 
        pmu->cpuhp_state = ret;
 
        /* Register the pmu instance for cpu hotplug */
-       cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+       ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+       if (ret) {
+               dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+               goto cpuhp_instance_err;
+       }
 
        /* Request irq */
        irq = of_irq_get(np, 0);
@@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
        return 0;
 
 ddr_perf_err:
-       if (pmu->cpuhp_state)
-               cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
-
+       cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+       cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
        ida_simple_remove(&ddr_ida, pmu->id);
        dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
        return ret;
@@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
        struct ddr_pmu *pmu = platform_get_drvdata(pdev);
 
        cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+       cpuhp_remove_multi_state(pmu->cpuhp_state);
        irq_set_affinity_hint(pmu->irq, NULL);
 
        perf_pmu_unregister(&pmu->pmu);
index 96183e31b96abc9e85d7c8d56a839fe0d06461f3..584de8f807cc404d7743e23342ee1073cd781e5e 100644 (file)
@@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
        hisi_pmu->ops->stop_counters(hisi_pmu);
 }
 
+
 /*
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
- * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
- * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
- * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
+ *
+ * - For MT variants of TSV110:
+ *   SCCL is Aff2[7:3], CCL is Aff2[2:0]
+ *
+ * - For other MT parts:
+ *   SCCL is Aff3[7:0], CCL is Aff2[7:0]
+ *
+ * - For non-MT parts:
+ *   SCCL is Aff2[7:0], CCL is Aff1[7:0]
  */
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
 {
        u64 mpidr = read_cpuid_mpidr();
-
-       if (mpidr & MPIDR_MT_BITMASK) {
-               if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
-                       int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
-                       if (sccl_id)
-                               *sccl_id = aff2 >> 3;
-                       if (ccl_id)
-                               *ccl_id = aff2 & 0x7;
-               } else {
-                       if (sccl_id)
-                               *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
-                       if (ccl_id)
-                               *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-               }
+       int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+       int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+       int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       bool mt = mpidr & MPIDR_MT_BITMASK;
+       int sccl, ccl;
+
+       if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+               sccl = aff2 >> 3;
+               ccl = aff2 & 0x7;
+       } else if (mt) {
+               sccl = aff3;
+               ccl = aff2;
        } else {
-               if (sccl_id)
-                       *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-               if (ccl_id)
-                       *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+               sccl = aff2;
+               ccl = aff1;
        }
+
+       if (scclp)
+               *scclp = sccl;
+       if (cclp)
+               *cclp = ccl;
 }
 
 /*
index ead06c6c26019a91424a7e6be990f9ed7a8dbb0f..12e71a315a2cba315cb4f1c9b28422e3cbc8b93f 100644 (file)
@@ -115,7 +115,7 @@ struct cpcap_usb_ints_state {
 enum cpcap_gpio_mode {
        CPCAP_DM_DP,
        CPCAP_MDM_RX_TX,
-       CPCAP_UNKNOWN,
+       CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */
        CPCAP_OTG_DM_DP,
 };
 
@@ -134,6 +134,8 @@ struct cpcap_phy_ddata {
        struct iio_channel *id;
        struct regulator *vusb;
        atomic_t active;
+       unsigned int vbus_provider:1;
+       unsigned int docked:1;
 };
 
 static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
@@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
 static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
 static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
 
+static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
+                                      enum musb_vbus_id_status status)
+{
+       int error;
+
+       error = musb_mailbox(status);
+       if (!error)
+               return;
+
+       dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
+               __func__, error);
+}
+
 static void cpcap_usb_detect(struct work_struct *work)
 {
        struct cpcap_phy_ddata *ddata;
@@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work)
        if (error)
                return;
 
-       if (s.id_ground) {
-               dev_dbg(ddata->dev, "id ground, USB host mode\n");
+       vbus = cpcap_usb_vbus_valid(ddata);
+
+       /* We need to kick the VBUS as USB A-host */
+       if (s.id_ground && ddata->vbus_provider) {
+               dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n");
+
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+               error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+                                          CPCAP_BIT_VBUSSTBY_EN |
+                                          CPCAP_BIT_VBUSEN_SPI,
+                                          CPCAP_BIT_VBUSEN_SPI);
+               if (error)
+                       goto out_err;
+
+               return;
+       }
+
+       if (vbus && s.id_ground && ddata->docked) {
+               dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n");
+
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+               return;
+       }
+
+       /* No VBUS needed with docks */
+       if (vbus && s.id_ground && !ddata->vbus_provider) {
+               dev_dbg(ddata->dev, "connected to a dock\n");
+
+               ddata->docked = true;
+
                error = cpcap_usb_set_usb_mode(ddata);
                if (error)
                        goto out_err;
 
-               error = musb_mailbox(MUSB_ID_GROUND);
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+               /*
+                * Force check state again after musb has reoriented,
+                * otherwise devices won't enumerate after loading PHY
+                * driver.
+                */
+               schedule_delayed_work(&ddata->detect_work,
+                                     msecs_to_jiffies(1000));
+
+               return;
+       }
+
+       if (s.id_ground && !ddata->docked) {
+               dev_dbg(ddata->dev, "id ground, USB host mode\n");
+
+               ddata->vbus_provider = true;
+
+               error = cpcap_usb_set_usb_mode(ddata);
                if (error)
                        goto out_err;
 
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
                error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
                                           CPCAP_BIT_VBUSSTBY_EN |
                                           CPCAP_BIT_VBUSEN_SPI,
@@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work)
 
        vbus = cpcap_usb_vbus_valid(ddata);
 
+       /* Otherwise assume we're connected to a USB host */
        if (vbus) {
-               /* Are we connected to a docking station with vbus? */
-               if (s.id_ground) {
-                       dev_dbg(ddata->dev, "connected to a dock\n");
-
-                       /* No VBUS needed with docks */
-                       error = cpcap_usb_set_usb_mode(ddata);
-                       if (error)
-                               goto out_err;
-                       error = musb_mailbox(MUSB_ID_GROUND);
-                       if (error)
-                               goto out_err;
-
-                       return;
-               }
-
-               /* Otherwise assume we're connected to a USB host */
                dev_dbg(ddata->dev, "connected to USB host\n");
                error = cpcap_usb_set_usb_mode(ddata);
                if (error)
                        goto out_err;
-               error = musb_mailbox(MUSB_VBUS_VALID);
-               if (error)
-                       goto out_err;
+               cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
 
                return;
        }
 
+       ddata->vbus_provider = false;
+       ddata->docked = false;
+       cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
+
        /* Default to debug UART mode */
        error = cpcap_usb_set_uart_mode(ddata);
        if (error)
                goto out_err;
 
-       error = musb_mailbox(MUSB_VBUS_OFF);
-       if (error)
-               goto out_err;
-
        dev_dbg(ddata->dev, "set UART mode\n");
 
        return;
@@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
 {
        int error;
 
-       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+       /* Disable lines to prevent glitches from waking up mdm6600 */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
        if (error)
                goto out_err;
 
@@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
        if (error)
                goto out_err;
 
+       /* Enable UART mode */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+       if (error)
+               goto out_err;
+
        return 0;
 
 out_err:
@@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
 {
        int error;
 
-       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+       /* Disable lines to prevent glitches from waking up mdm6600 */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
        if (error)
                return error;
 
@@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
        if (error)
                goto out_err;
 
-       error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
-                                  CPCAP_BIT_USBXCVREN,
-                                  CPCAP_BIT_USBXCVREN);
-       if (error)
-               goto out_err;
-
        error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
                                   CPCAP_BIT_PU_SPI |
                                   CPCAP_BIT_DMPD_SPI |
@@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
        if (error)
                goto out_err;
 
+       /* Enable USB mode */
+       error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+       if (error)
+               goto out_err;
+
        return 0;
 
 out_err:
@@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
        if (error)
                dev_err(ddata->dev, "could not set UART mode\n");
 
-       error = musb_mailbox(MUSB_VBUS_OFF);
-       if (error)
-               dev_err(ddata->dev, "could not set mailbox\n");
+       cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
 
        usb_remove_phy(&ddata->phy);
        cancel_delayed_work_sync(&ddata->detect_work);
index ee184d5607bdb39240b2729b82688ef67501df06..f20524f0c21d9aea86013601ec47c381bf32db26 100644 (file)
@@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work)
        struct phy_mdm6600 *ddata;
        struct device *dev;
        DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
-       int error, i, val = 0;
+       int error;
 
        ddata = container_of(work, struct phy_mdm6600, status_work.work);
        dev = ddata->dev;
@@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work)
        if (error)
                return;
 
-       for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
-               val |= test_bit(i, values) << i;
-               dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
-                       __func__, i, test_bit(i, values), val);
-       }
-       ddata->status = values[0];
+       ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
 
        dev_info(dev, "modem status: %i %s\n",
                 ddata->status,
-                phy_mdm6600_status_name[ddata->status & 7]);
+                phy_mdm6600_status_name[ddata->status]);
        complete(&ddata->ack);
 }
 
index 091e20303a14d6b3569eb806eb3bf72beb56ae4e..66f91726b8b24b4ded24806e2e701ff4bc066904 100644 (file)
@@ -66,7 +66,7 @@
 /* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
 #define CLAMP_EN                               BIT(0) /* enables i/o clamp_n */
 
-#define PHY_INIT_COMPLETE_TIMEOUT              1000
+#define PHY_INIT_COMPLETE_TIMEOUT              10000
 #define POWER_DOWN_DELAY_US_MIN                        10
 #define POWER_DOWN_DELAY_US_MAX                        11
 
index 2b97fb1185a00e68b41858c15a8a99b0ae7303b6..9ca20c947283de2f5d03866aa404c0411b2eb72c 100644 (file)
@@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
 {
        const struct pre_pll_config *cfg = pre_pll_cfg_table;
 
+       rate = (rate / 1000) * 1000;
+
        for (; cfg->pixclock != 0; cfg++)
                if (cfg->pixclock == rate && !cfg->fracdiv)
                        break;
@@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
 {
        const struct pre_pll_config *cfg = pre_pll_cfg_table;
 
+       rate = (rate / 1000) * 1000;
+
        for (; cfg->pixclock != 0; cfg++)
                if (cfg->pixclock == rate)
                        break;
index 3bfbf2ff6e2bd58cb242e4891dd23d9192aa9cdc..df0ef69dd4748606391b339e7c99a661fa3501a9 100644 (file)
@@ -422,6 +422,7 @@ config PINCTRL_TB10X
 
 config PINCTRL_EQUILIBRIUM
        tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+       depends on OF && HAS_IOMEM
        select PINMUX
        select PINCONF
        select GPIOLIB
index c6800d220920ecb3613789ac094d8105ae0cdd94..bb07024d22edcc4b0f5e00bfff648cacfb5de0b9 100644 (file)
@@ -1088,60 +1088,52 @@ SSSF_PIN_DECL(AF15, GPIOV7, LPCSMI, SIG_DESC_SET(SCU434, 15));
 
 #define AB7 176
 SIG_EXPR_LIST_DECL_SESG(AB7, LAD0, LPC, SIG_DESC_SET(SCU434, 16),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16));
 PIN_DECL_2(AB7, GPIOW0, LAD0, ESPID0);
 
 #define AB8 177
 SIG_EXPR_LIST_DECL_SESG(AB8, LAD1, LPC, SIG_DESC_SET(SCU434, 17),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17));
 PIN_DECL_2(AB8, GPIOW1, LAD1, ESPID1);
 
 #define AC8 178
 SIG_EXPR_LIST_DECL_SESG(AC8, LAD2, LPC, SIG_DESC_SET(SCU434, 18),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18));
 PIN_DECL_2(AC8, GPIOW2, LAD2, ESPID2);
 
 #define AC7 179
 SIG_EXPR_LIST_DECL_SESG(AC7, LAD3, LPC, SIG_DESC_SET(SCU434, 19),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19));
 PIN_DECL_2(AC7, GPIOW3, LAD3, ESPID3);
 
 #define AE7 180
 SIG_EXPR_LIST_DECL_SESG(AE7, LCLK, LPC, SIG_DESC_SET(SCU434, 20),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20));
 PIN_DECL_2(AE7, GPIOW4, LCLK, ESPICK);
 
 #define AF7 181
 SIG_EXPR_LIST_DECL_SESG(AF7, LFRAME, LPC, SIG_DESC_SET(SCU434, 21),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21));
 PIN_DECL_2(AF7, GPIOW5, LFRAME, ESPICS);
 
 #define AD7 182
 SIG_EXPR_LIST_DECL_SESG(AD7, LSIRQ, LSIRQ, SIG_DESC_SET(SCU434, 22),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22));
 PIN_DECL_2(AD7, GPIOW6, LSIRQ, ESPIALT);
 FUNC_GROUP_DECL(LSIRQ, AD7);
 FUNC_GROUP_DECL(ESPIALT, AD7);
 
 #define AD8 183
 SIG_EXPR_LIST_DECL_SESG(AD8, LPCRST, LPC, SIG_DESC_SET(SCU434, 23),
-                         SIG_DESC_CLEAR(SCU510, 6));
-SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23),
                          SIG_DESC_SET(SCU510, 6));
+SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23));
 PIN_DECL_2(AD8, GPIOW7, LPCRST, ESPIRST);
 
 FUNC_GROUP_DECL(LPC, AB7, AB8, AC8, AC7, AE7, AF7, AD8);
index 32f268f173d1e854bd9de7c44b98364c95f7a511..57044ab376d3cf72954813bfe303ab7e0c8d644f 100644 (file)
@@ -1049,7 +1049,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!res)
                return -EINVAL;
-       pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+       pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
                                        resource_size(res));
        if (!pinctrl->base1) {
                dev_err(&pdev->dev, "unable to map I/O space\n");
index 3756fc9d5826b72bc2f338315e4a328b6c2e5a0e..f1d60a708815a870996248e4c3972dd3e1d51d4f 100644 (file)
@@ -578,7 +578,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!res)
                return -EINVAL;
-       pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+       pinctrl->base1 = devm_ioremap(&pdev->dev, res->start,
                                              resource_size(res));
        if (!pinctrl->base1) {
                dev_err(&pdev->dev, "unable to map I/O space\n");
index f1806fd781a05c6b952bf685e6a577bd09715986..530426a74f75148881bbe5007e9eaac541012435 100644 (file)
@@ -2,6 +2,7 @@
 config PINCTRL_LOCHNAGAR
        tristate "Cirrus Logic Lochnagar pinctrl driver"
        depends on MFD_LOCHNAGAR
+       select GPIOLIB
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
index 2bbd8ee935075b7a419851bad3767efb4475d05d..46600d9380ea6a43e8b9be9b1810aa697b97a4d4 100644 (file)
@@ -1535,15 +1535,8 @@ int pinctrl_init_done(struct device *dev)
        return ret;
 }
 
-#ifdef CONFIG_PM
-
-/**
- * pinctrl_pm_select_state() - select pinctrl state for PM
- * @dev: device to select default state for
- * @state: state to set
- */
-static int pinctrl_pm_select_state(struct device *dev,
-                                  struct pinctrl_state *state)
+static int pinctrl_select_bound_state(struct device *dev,
+                                     struct pinctrl_state *state)
 {
        struct dev_pin_info *pins = dev->pins;
        int ret;
@@ -1558,15 +1551,27 @@ static int pinctrl_pm_select_state(struct device *dev,
 }
 
 /**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_select_default_state() - select default pinctrl state
  * @dev: device to select default state for
  */
-int pinctrl_pm_select_default_state(struct device *dev)
+int pinctrl_select_default_state(struct device *dev)
 {
        if (!dev->pins)
                return 0;
 
-       return pinctrl_pm_select_state(dev, dev->pins->default_state);
+       return pinctrl_select_bound_state(dev, dev->pins->default_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_select_default_state);
+
+#ifdef CONFIG_PM
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+       return pinctrl_select_default_state(dev);
 }
 EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
 
@@ -1579,7 +1584,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev)
        if (!dev->pins)
                return 0;
 
-       return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
+       return pinctrl_select_bound_state(dev, dev->pins->sleep_state);
 }
 EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
 
@@ -1592,7 +1597,7 @@ int pinctrl_pm_select_idle_state(struct device *dev)
        if (!dev->pins)
                return 0;
 
-       return pinctrl_pm_select_state(dev, dev->pins->idle_state);
+       return pinctrl_select_bound_state(dev, dev->pins->idle_state);
 }
 EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
 #endif
index 7e29e3fecdb246568a78a205f978325c5f203cdf..c00d0022d311bdf01adc5f30409a60abdd60c6eb 100644 (file)
@@ -611,7 +611,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
        if (!res)
                return -ENOENT;
 
-       ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start,
+       ipctl->base = devm_ioremap(&pdev->dev, res->start,
                        resource_size(res));
        if (!ipctl->base)
                return -ENOMEM;
index 9ffb22211d2b007ff70cc99d4caf5403ef0609c3..55141d5de29e60254165ae1b269c31d9bfd36331 100644 (file)
@@ -110,7 +110,6 @@ struct byt_gpio {
        struct platform_device *pdev;
        struct pinctrl_dev *pctl_dev;
        struct pinctrl_desc pctl_desc;
-       raw_spinlock_t lock;
        const struct intel_pinctrl_soc_data *soc_data;
        struct intel_community *communities_copy;
        struct byt_gpio_pin_context *saved_context;
@@ -494,34 +493,34 @@ static const struct intel_pinctrl_soc_data byt_sus_soc_data = {
 };
 
 static const struct pinctrl_pin_desc byt_ncore_pins[] = {
-       PINCTRL_PIN(0, "GPIO_NCORE0"),
-       PINCTRL_PIN(1, "GPIO_NCORE1"),
-       PINCTRL_PIN(2, "GPIO_NCORE2"),
-       PINCTRL_PIN(3, "GPIO_NCORE3"),
-       PINCTRL_PIN(4, "GPIO_NCORE4"),
-       PINCTRL_PIN(5, "GPIO_NCORE5"),
-       PINCTRL_PIN(6, "GPIO_NCORE6"),
-       PINCTRL_PIN(7, "GPIO_NCORE7"),
-       PINCTRL_PIN(8, "GPIO_NCORE8"),
-       PINCTRL_PIN(9, "GPIO_NCORE9"),
-       PINCTRL_PIN(10, "GPIO_NCORE10"),
-       PINCTRL_PIN(11, "GPIO_NCORE11"),
-       PINCTRL_PIN(12, "GPIO_NCORE12"),
-       PINCTRL_PIN(13, "GPIO_NCORE13"),
-       PINCTRL_PIN(14, "GPIO_NCORE14"),
-       PINCTRL_PIN(15, "GPIO_NCORE15"),
-       PINCTRL_PIN(16, "GPIO_NCORE16"),
-       PINCTRL_PIN(17, "GPIO_NCORE17"),
-       PINCTRL_PIN(18, "GPIO_NCORE18"),
-       PINCTRL_PIN(19, "GPIO_NCORE19"),
-       PINCTRL_PIN(20, "GPIO_NCORE20"),
-       PINCTRL_PIN(21, "GPIO_NCORE21"),
-       PINCTRL_PIN(22, "GPIO_NCORE22"),
-       PINCTRL_PIN(23, "GPIO_NCORE23"),
-       PINCTRL_PIN(24, "GPIO_NCORE24"),
-       PINCTRL_PIN(25, "GPIO_NCORE25"),
-       PINCTRL_PIN(26, "GPIO_NCORE26"),
-       PINCTRL_PIN(27, "GPIO_NCORE27"),
+       PINCTRL_PIN(0, "HV_DDI0_HPD"),
+       PINCTRL_PIN(1, "HV_DDI0_DDC_SDA"),
+       PINCTRL_PIN(2, "HV_DDI0_DDC_SCL"),
+       PINCTRL_PIN(3, "PANEL0_VDDEN"),
+       PINCTRL_PIN(4, "PANEL0_BKLTEN"),
+       PINCTRL_PIN(5, "PANEL0_BKLTCTL"),
+       PINCTRL_PIN(6, "HV_DDI1_HPD"),
+       PINCTRL_PIN(7, "HV_DDI1_DDC_SDA"),
+       PINCTRL_PIN(8, "HV_DDI1_DDC_SCL"),
+       PINCTRL_PIN(9, "PANEL1_VDDEN"),
+       PINCTRL_PIN(10, "PANEL1_BKLTEN"),
+       PINCTRL_PIN(11, "PANEL1_BKLTCTL"),
+       PINCTRL_PIN(12, "GP_INTD_DSI_TE1"),
+       PINCTRL_PIN(13, "HV_DDI2_DDC_SDA"),
+       PINCTRL_PIN(14, "HV_DDI2_DDC_SCL"),
+       PINCTRL_PIN(15, "GP_CAMERASB00"),
+       PINCTRL_PIN(16, "GP_CAMERASB01"),
+       PINCTRL_PIN(17, "GP_CAMERASB02"),
+       PINCTRL_PIN(18, "GP_CAMERASB03"),
+       PINCTRL_PIN(19, "GP_CAMERASB04"),
+       PINCTRL_PIN(20, "GP_CAMERASB05"),
+       PINCTRL_PIN(21, "GP_CAMERASB06"),
+       PINCTRL_PIN(22, "GP_CAMERASB07"),
+       PINCTRL_PIN(23, "GP_CAMERASB08"),
+       PINCTRL_PIN(24, "GP_CAMERASB09"),
+       PINCTRL_PIN(25, "GP_CAMERASB10"),
+       PINCTRL_PIN(26, "GP_CAMERASB11"),
+       PINCTRL_PIN(27, "GP_INTD_DSI_TE2"),
 };
 
 static const unsigned int byt_ncore_pins_map[BYT_NGPIO_NCORE] = {
@@ -549,6 +548,8 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = {
        NULL
 };
 
+static DEFINE_RAW_SPINLOCK(byt_lock);
+
 static struct intel_community *byt_get_community(struct byt_gpio *vg,
                                                 unsigned int pin)
 {
@@ -658,7 +659,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
        unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
 
        for (i = 0; i < group.npins; i++) {
                void __iomem *padcfg0;
@@ -678,7 +679,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
                writel(value, padcfg0);
        }
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static void byt_set_group_mixed_mux(struct byt_gpio *vg,
@@ -688,7 +689,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
        unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
 
        for (i = 0; i < group.npins; i++) {
                void __iomem *padcfg0;
@@ -708,7 +709,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
                writel(value, padcfg0);
        }
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@@ -749,11 +750,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
        unsigned long flags;
        u32 value;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        value = readl(reg);
        value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
        writel(value, reg);
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@@ -765,7 +766,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
        u32 value, gpio_mux;
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
 
        /*
         * In most cases, func pin mux 000 means GPIO function.
@@ -787,7 +788,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
                         "pin %u forcibly re-configured as GPIO\n", offset);
        }
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        pm_runtime_get(&vg->pdev->dev);
 
@@ -815,7 +816,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
        unsigned long flags;
        u32 value;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
 
        value = readl(val_reg);
        value &= ~BYT_DIR_MASK;
@@ -832,7 +833,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
                     "Potential Error: Setting GPIO with direct_irq_en to output");
        writel(value, val_reg);
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        return 0;
 }
@@ -901,11 +902,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        u32 conf, pull, val, debounce;
        u16 arg = 0;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        conf = readl(conf_reg);
        pull = conf & BYT_PULL_ASSIGN_MASK;
        val = readl(val_reg);
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        switch (param) {
        case PIN_CONFIG_BIAS_DISABLE:
@@ -932,9 +933,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                if (!(conf & BYT_DEBOUNCE_EN))
                        return -EINVAL;
 
-               raw_spin_lock_irqsave(&vg->lock, flags);
+               raw_spin_lock_irqsave(&byt_lock, flags);
                debounce = readl(db_reg);
-               raw_spin_unlock_irqrestore(&vg->lock, flags);
+               raw_spin_unlock_irqrestore(&byt_lock, flags);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
                case BYT_DEBOUNCE_PULSE_375US:
@@ -986,7 +987,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        u32 conf, val, debounce;
        int i, ret = 0;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
 
        conf = readl(conf_reg);
        val = readl(val_reg);
@@ -1094,7 +1095,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        if (!ret)
                writel(conf, conf_reg);
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        return ret;
 }
@@ -1119,9 +1120,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
        unsigned long flags;
        u32 val;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        val = readl(reg);
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        return !!(val & BYT_LEVEL);
 }
@@ -1136,13 +1137,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
        if (!reg)
                return;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        old_val = readl(reg);
        if (value)
                writel(old_val | BYT_LEVEL, reg);
        else
                writel(old_val & ~BYT_LEVEL, reg);
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -1155,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
        if (!reg)
                return -EINVAL;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        value = readl(reg);
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        if (!(value & BYT_OUTPUT_EN))
                return 0;
@@ -1200,14 +1201,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                const char *label;
                unsigned int pin;
 
-               raw_spin_lock_irqsave(&vg->lock, flags);
+               raw_spin_lock_irqsave(&byt_lock, flags);
                pin = vg->soc_data->pins[i].number;
                reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
                if (!reg) {
                        seq_printf(s,
                                   "Could not retrieve pin %i conf0 reg\n",
                                   pin);
-                       raw_spin_unlock_irqrestore(&vg->lock, flags);
+                       raw_spin_unlock_irqrestore(&byt_lock, flags);
                        continue;
                }
                conf0 = readl(reg);
@@ -1216,11 +1217,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                if (!reg) {
                        seq_printf(s,
                                   "Could not retrieve pin %i val reg\n", pin);
-                       raw_spin_unlock_irqrestore(&vg->lock, flags);
+                       raw_spin_unlock_irqrestore(&byt_lock, flags);
                        continue;
                }
                val = readl(reg);
-               raw_spin_unlock_irqrestore(&vg->lock, flags);
+               raw_spin_unlock_irqrestore(&byt_lock, flags);
 
                comm = byt_get_community(vg, pin);
                if (!comm) {
@@ -1304,9 +1305,9 @@ static void byt_irq_ack(struct irq_data *d)
        if (!reg)
                return;
 
-       raw_spin_lock(&vg->lock);
+       raw_spin_lock(&byt_lock);
        writel(BIT(offset % 32), reg);
-       raw_spin_unlock(&vg->lock);
+       raw_spin_unlock(&byt_lock);
 }
 
 static void byt_irq_mask(struct irq_data *d)
@@ -1330,7 +1331,7 @@ static void byt_irq_unmask(struct irq_data *d)
        if (!reg)
                return;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        value = readl(reg);
 
        switch (irqd_get_trigger_type(d)) {
@@ -1353,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d)
 
        writel(value, reg);
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_irq_type(struct irq_data *d, unsigned int type)
@@ -1367,7 +1368,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
        if (!reg || offset >= vg->chip.ngpio)
                return -EINVAL;
 
-       raw_spin_lock_irqsave(&vg->lock, flags);
+       raw_spin_lock_irqsave(&byt_lock, flags);
        value = readl(reg);
 
        WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1389,7 +1390,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
        else if (type & IRQ_TYPE_LEVEL_MASK)
                irq_set_handler_locked(d, handle_level_irq);
 
-       raw_spin_unlock_irqrestore(&vg->lock, flags);
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        return 0;
 }
@@ -1425,9 +1426,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
-               raw_spin_lock(&vg->lock);
+               raw_spin_lock(&byt_lock);
                pending = readl(reg);
-               raw_spin_unlock(&vg->lock);
+               raw_spin_unlock(&byt_lock);
                for_each_set_bit(pin, &pending, 32) {
                        virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
                        generic_handle_irq(virq);
@@ -1450,9 +1451,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip,
         */
 }
 
-static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
+static int byt_gpio_irq_init_hw(struct gpio_chip *chip)
 {
-       struct gpio_chip *gc = &vg->chip;
+       struct byt_gpio *vg = gpiochip_get_data(chip);
        struct device *dev = &vg->pdev->dev;
        void __iomem *reg;
        u32 base, value;
@@ -1476,7 +1477,7 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 
                value = readl(reg);
                if (value & BYT_DIRECT_IRQ_EN) {
-                       clear_bit(i, gc->irq.valid_mask);
+                       clear_bit(i, chip->irq.valid_mask);
                        dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
                } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
                        byt_gpio_clear_triggering(vg, i);
@@ -1504,6 +1505,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
                                "GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n",
                                base / 32, value);
        }
+
+       return 0;
+}
+
+static int byt_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+       struct byt_gpio *vg = gpiochip_get_data(chip);
+       struct device *dev = &vg->pdev->dev;
+       int ret;
+
+       ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc_data->npins);
+       if (ret)
+               dev_err(dev, "failed to add GPIO pin range\n");
+
+       return ret;
 }
 
 static int byt_gpio_probe(struct byt_gpio *vg)
@@ -1518,6 +1534,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->label       = dev_name(&vg->pdev->dev);
        gc->base        = -1;
        gc->can_sleep   = false;
+       gc->add_pin_ranges = byt_gpio_add_pin_ranges;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
        gc->irq.init_valid_mask = byt_init_irq_valid_mask;
@@ -1528,33 +1545,30 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        if (!vg->saved_context)
                return -ENOMEM;
 #endif
-       ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
-       if (ret) {
-               dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
-               return ret;
-       }
-
-       ret = gpiochip_add_pin_range(&vg->chip, dev_name(&vg->pdev->dev),
-                                    0, 0, vg->soc_data->npins);
-       if (ret) {
-               dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n");
-               return ret;
-       }
 
        /* set up interrupts  */
        irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
        if (irq_rc && irq_rc->start) {
-               byt_gpio_irq_init_hw(vg);
-               ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0,
-                                          handle_bad_irq, IRQ_TYPE_NONE);
-               if (ret) {
-                       dev_err(&vg->pdev->dev, "failed to add irqchip\n");
-                       return ret;
-               }
+               struct gpio_irq_chip *girq;
+
+               girq = &gc->irq;
+               girq->chip = &byt_irqchip;
+               girq->init_hw = byt_gpio_irq_init_hw;
+               girq->parent_handler = byt_gpio_irq_handler;
+               girq->num_parents = 1;
+               girq->parents = devm_kcalloc(&vg->pdev->dev, girq->num_parents,
+                                            sizeof(*girq->parents), GFP_KERNEL);
+               if (!girq->parents)
+                       return -ENOMEM;
+               girq->parents[0] = (unsigned int)irq_rc->start;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_bad_irq;
+       }
 
-               gpiochip_set_chained_irqchip(gc, &byt_irqchip,
-                                            (unsigned)irq_rc->start,
-                                            byt_gpio_irq_handler);
+       ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
+       if (ret) {
+               dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
+               return ret;
        }
 
        return ret;
@@ -1638,8 +1652,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
                return PTR_ERR(vg->pctl_dev);
        }
 
-       raw_spin_lock_init(&vg->lock);
-
        ret = byt_gpio_probe(vg);
        if (ret)
                return ret;
@@ -1654,8 +1666,11 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
 static int byt_gpio_suspend(struct device *dev)
 {
        struct byt_gpio *vg = dev_get_drvdata(dev);
+       unsigned long flags;
        int i;
 
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
        for (i = 0; i < vg->soc_data->npins; i++) {
                void __iomem *reg;
                u32 value;
@@ -1676,14 +1691,18 @@ static int byt_gpio_suspend(struct device *dev)
                vg->saved_context[i].val = value;
        }
 
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
 static int byt_gpio_resume(struct device *dev)
 {
        struct byt_gpio *vg = dev_get_drvdata(dev);
+       unsigned long flags;
        int i;
 
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
        for (i = 0; i < vg->soc_data->npins; i++) {
                void __iomem *reg;
                u32 value;
@@ -1721,6 +1740,7 @@ static int byt_gpio_resume(struct device *dev)
                }
        }
 
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 #endif
index 582fa8a755598d56682a600499a9875825ee47dc..60527b93a711539afc666dbf0e1129a0b7454e36 100644 (file)
@@ -149,6 +149,7 @@ struct chv_pin_context {
  * @chip: GPIO chip in this pin controller
  * @irqchip: IRQ chip in this pin controller
  * @regs: MMIO registers
+ * @irq: Our parent irq
  * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
  *             offset (in GPIO number space)
  * @community: Community this pinctrl instance represents
@@ -165,6 +166,7 @@ struct chv_pinctrl {
        struct gpio_chip chip;
        struct irq_chip irqchip;
        void __iomem *regs;
+       unsigned int irq;
        unsigned int intr_lines[16];
        const struct chv_community *community;
        u32 saved_intmask;
@@ -1555,39 +1557,9 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
        }
 }
 
-static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
 {
-       const struct chv_gpio_pinrange *range;
-       struct gpio_chip *chip = &pctrl->chip;
-       bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
-       const struct chv_community *community = pctrl->community;
-       int ret, i, irq_base;
-
-       *chip = chv_gpio_chip;
-
-       chip->ngpio = community->pins[community->npins - 1].number + 1;
-       chip->label = dev_name(pctrl->dev);
-       chip->parent = pctrl->dev;
-       chip->base = -1;
-       if (need_valid_mask)
-               chip->irq.init_valid_mask = chv_init_irq_valid_mask;
-
-       ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
-       if (ret) {
-               dev_err(pctrl->dev, "Failed to register gpiochip\n");
-               return ret;
-       }
-
-       for (i = 0; i < community->ngpio_ranges; i++) {
-               range = &community->gpio_ranges[i];
-               ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
-                                            range->base, range->base,
-                                            range->npins);
-               if (ret) {
-                       dev_err(pctrl->dev, "failed to add GPIO pin range\n");
-                       return ret;
-               }
-       }
+       struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
 
        /*
         * The same set of machines in chv_no_valid_mask[] have incorrectly
@@ -1596,7 +1568,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
         *
         * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
         */
-       if (!need_valid_mask) {
+       if (!pctrl->chip.irq.init_valid_mask) {
                /*
                 * Mask all interrupts the community is able to generate
                 * but leave the ones that can only generate GPEs unmasked.
@@ -1608,15 +1580,47 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
-       if (!need_valid_mask) {
-               irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
-                                               community->npins, NUMA_NO_NODE);
-               if (irq_base < 0) {
-                       dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
-                       return irq_base;
+       return 0;
+}
+
+static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+       struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+       const struct chv_community *community = pctrl->community;
+       const struct chv_gpio_pinrange *range;
+       int ret, i;
+
+       for (i = 0; i < community->ngpio_ranges; i++) {
+               range = &community->gpio_ranges[i];
+               ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
+                                            range->base, range->base,
+                                            range->npins);
+               if (ret) {
+                       dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+                       return ret;
                }
        }
 
+       return 0;
+}
+
+static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+{
+       const struct chv_gpio_pinrange *range;
+       struct gpio_chip *chip = &pctrl->chip;
+       bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
+       const struct chv_community *community = pctrl->community;
+       int ret, i, irq_base;
+
+       *chip = chv_gpio_chip;
+
+       chip->ngpio = community->pins[community->npins - 1].number + 1;
+       chip->label = dev_name(pctrl->dev);
+       chip->add_pin_ranges = chv_gpio_add_pin_ranges;
+       chip->parent = pctrl->dev;
+       chip->base = -1;
+
+       pctrl->irq = irq;
        pctrl->irqchip.name = "chv-gpio";
        pctrl->irqchip.irq_startup = chv_gpio_irq_startup;
        pctrl->irqchip.irq_ack = chv_gpio_irq_ack;
@@ -1625,10 +1629,27 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
        pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
 
-       ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0,
-                                  handle_bad_irq, IRQ_TYPE_NONE);
+       chip->irq.chip = &pctrl->irqchip;
+       chip->irq.init_hw = chv_gpio_irq_init_hw;
+       chip->irq.parent_handler = chv_gpio_irq_handler;
+       chip->irq.num_parents = 1;
+       chip->irq.parents = &pctrl->irq;
+       chip->irq.default_type = IRQ_TYPE_NONE;
+       chip->irq.handler = handle_bad_irq;
+       if (need_valid_mask) {
+               chip->irq.init_valid_mask = chv_init_irq_valid_mask;
+       } else {
+               irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+                                               community->npins, NUMA_NO_NODE);
+               if (irq_base < 0) {
+                       dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+                       return irq_base;
+               }
+       }
+
+       ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
        if (ret) {
-               dev_err(pctrl->dev, "failed to add IRQ chip\n");
+               dev_err(pctrl->dev, "Failed to register gpiochip\n");
                return ret;
        }
 
@@ -1642,8 +1663,6 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                }
        }
 
-       gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq,
-                                    chv_gpio_irq_handler);
        return 0;
 }
 
index 44d7f50bbc82e9d431aef96ea2be296e5d04e673..d936e7aa74c4b258a225ad956dad97731e68137a 100644 (file)
@@ -49,6 +49,7 @@
                .padown_offset = SPT_PAD_OWN,           \
                .padcfglock_offset = SPT_PADCFGLOCK,    \
                .hostown_offset = SPT_HOSTSW_OWN,       \
+               .is_offset = SPT_GPI_IS,                \
                .ie_offset = SPT_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
index 3c80828a5e503bf634f281bd57b7f6031a1df453..bbc919bef2bf5d367498978e2b3955e4b7b30177 100644 (file)
@@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
                return ret;
 
        meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
+       bit = bit << 1;
 
        ret = regmap_read(pc->reg_ds, reg, &val);
        if (ret)
index eab078244a4c3e8c4c6bfea829e41edab62f76c4..73aff6591de21df2455a1bfb43a3c7f59c6ef721 100644 (file)
@@ -866,7 +866,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
+       gpio_dev->base = devm_ioremap(&pdev->dev, res->start,
                                                resource_size(res));
        if (!gpio_dev->base)
                return -ENOMEM;
index 24e0e2ef47a4dbad8e6d70771cb973df6ccfb03a..369e04350e3dec7b29309e1b6b4deb64ef7bea9e 100644 (file)
@@ -1809,7 +1809,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
 static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
                                     unsigned int pin, bool high)
 {
-       if (jzpc->version >= ID_JZ4770)
+       if (jzpc->version >= ID_JZ4760)
                ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
        else
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
index e914f6efd39e87b45648512293e2269db0b0638d..9503ddf2edc76a2dda40168b818c3f3e193e983b 100644 (file)
@@ -85,7 +85,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin)
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
 
        /* Can't inspect pin, assume it can be used */
-       if (!desc)
+       if (!desc || !ops)
                return true;
 
        if (ops->strict && desc->mux_usecount)
index bb0edf51dfda4cf96bd5c7f5496ba213163e5468..5731d1b60e28a4d4e9b8a82b98310ff706a7cccf 100644 (file)
@@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec,
                return ret;
        }
 
-       if (response->status) {
-               dev_err(ec->dev,
-                       "EC reported failure sending keyboard LEDs command: %d",
-                       response->status);
-               return -EIO;
-       }
-
        return 0;
 }
 
@@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
 {
        struct wilco_keyboard_leds_msg request;
        struct wilco_keyboard_leds_msg response;
+       int ret;
 
        memset(&request, 0, sizeof(request));
        request.command = WILCO_EC_COMMAND_KBBL;
@@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
        request.mode    = WILCO_KBBL_MODE_FLAG_PWM;
        request.percent = brightness;
 
-       return send_kbbl_msg(ec, &request, &response);
+       ret = send_kbbl_msg(ec, &request, &response);
+       if (ret < 0)
+               return ret;
+
+       if (response.status) {
+               dev_err(ec->dev,
+                       "EC reported failure sending keyboard LEDs command: %d",
+                       response.status);
+               return -EIO;
+       }
+
+       return 0;
 }
 
 static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
@@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec)
        if (ret < 0)
                return ret;
 
+       if (response.status) {
+               dev_err(ec->dev,
+                       "EC reported failure sending keyboard LEDs command: %d",
+                       response.status);
+               return -EIO;
+       }
+
        if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
                return response.percent;
 
index 61753b648506b6f59b643647a2cb82aaa60eaf53..5d21c6adf1ab6de9647a25d3fb17452d5509b2c8 100644 (file)
@@ -309,7 +309,7 @@ static struct platform_driver mlxbf_bootctl_driver = {
        .probe = mlxbf_bootctl_probe,
        .driver = {
                .name = "mlxbf-bootctl",
-               .groups = mlxbf_bootctl_groups,
+               .dev_groups = mlxbf_bootctl_groups,
                .acpi_match_table = mlxbf_bootctl_acpi_ids,
        }
 };
index 9a5c9fd2dbc60cf2ab441ef48ba4d6db3cb4e119..5739a9669b29139319738c8775731f89074929df 100644 (file)
@@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
  * @work: work struct for deferred process
  * @timer: background timer
  * @vring: Tx/Rx ring
- * @spin_lock: spin lock
+ * @spin_lock: Tx/Rx spin lock
  * @is_ready: ready flag
  */
 struct mlxbf_tmfifo {
@@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
        struct work_struct work;
        struct timer_list timer;
        struct mlxbf_tmfifo_vring *vring[2];
-       spinlock_t spin_lock;           /* spin lock */
+       spinlock_t spin_lock[2];        /* spin lock */
        bool is_ready;
 };
 
@@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
        writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
 
        /* Use spin-lock to protect the 'cons->tx_buf'. */
-       spin_lock_irqsave(&fifo->spin_lock, flags);
+       spin_lock_irqsave(&fifo->spin_lock[0], flags);
 
        while (size > 0) {
                addr = cons->tx_buf.buf + cons->tx_buf.tail;
@@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
                }
        }
 
-       spin_unlock_irqrestore(&fifo->spin_lock, flags);
+       spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
 }
 
 /* Rx/Tx one word in the descriptor buffer. */
@@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
                fifo->vring[is_rx] = NULL;
 
                /* Notify upper layer that packet is done. */
-               spin_lock_irqsave(&fifo->spin_lock, flags);
+               spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
                vring_interrupt(0, vring->vq);
-               spin_unlock_irqrestore(&fifo->spin_lock, flags);
+               spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
        }
 
 mlxbf_tmfifo_desc_done:
@@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
                 * worker handler.
                 */
                if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
-                       spin_lock_irqsave(&fifo->spin_lock, flags);
+                       spin_lock_irqsave(&fifo->spin_lock[0], flags);
                        tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
                        mlxbf_tmfifo_console_output(tm_vdev, vring);
-                       spin_unlock_irqrestore(&fifo->spin_lock, flags);
+                       spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
                } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
                                            &fifo->pend_events)) {
                        return true;
@@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
        if (!fifo)
                return -ENOMEM;
 
-       spin_lock_init(&fifo->spin_lock);
+       spin_lock_init(&fifo->spin_lock[0]);
+       spin_lock_init(&fifo->spin_lock[1]);
        INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
        mutex_init(&fifo->lock);
 
index 706207d192aeba3ceabbc405e4f121405c311781..77be37a1fbcf58f30f135234d67294444ee46fda 100644 (file)
@@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
        item = pdata->items;
 
        for (i = 0; i < pdata->counter; i++, item++) {
+               if (item->capability) {
+                       /*
+                        * Read group capability register to get actual number
+                        * of interrupt capable components and set group mask
+                        * accordingly.
+                        */
+                       ret = regmap_read(priv->regmap, item->capability,
+                                         &regval);
+                       if (ret)
+                               goto out;
+
+                       item->mask = GENMASK((regval & item->mask) - 1, 0);
+               }
+
                /* Clear group presense event. */
                ret = regmap_write(priv->regmap, item->reg +
                                   MLXREG_HOTPLUG_EVENT_OFF, 0);
index f4d0a86c00d079557d66e8fe71e6110d762a46fa..5e77b0dc5fd6b958450915eb13507fc08f69b6fe 100644 (file)
@@ -18,7 +18,7 @@ if MIPS_PLATFORM_DEVICES
 
 config CPU_HWMON
        tristate "Loongson-3 CPU HWMon Driver"
-       depends on CONFIG_MACH_LOONGSON64
+       depends on MACH_LOONGSON64
        select HWMON
        default y
        help
index 27d5b40fb71724581f2161f70b8f14fa2a6fa24f..587403c445980a162027ea1c982bd861dcf6fc9d 100644 (file)
@@ -997,7 +997,6 @@ config INTEL_SCU_IPC
 config INTEL_SCU_IPC_UTIL
        tristate "Intel SCU IPC utility driver"
        depends on INTEL_SCU_IPC
-       default y
        ---help---
          The IPC Util driver provides an interface with the SCU enabling
          low level access for debug work and updating the firmware. Say
@@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM
        depends on PCI && IOSF_MBI && PM
        help
          Power-management driver for Intel's Image Signal Processor found on
-         Bay and Cherry Trail devices. This dummy driver's sole purpose is to
-         turn the ISP off (put it in D3) to save power and to allow entering
-         of S0ix modes.
+         Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+         is to turn the ISP off (put it in D3) to save power and to allow
+         entering of S0ix modes.
 
          To compile this driver as a module, choose M here: the module
          will be called intel_atomisp2_pm.
@@ -1337,6 +1336,17 @@ config PCENGINES_APU2
          To compile this driver as a module, choose M here: the module
          will be called pcengines-apuv2.
 
+config INTEL_UNCORE_FREQ_CONTROL
+       tristate "Intel Uncore frequency control driver"
+       depends on X86_64
+       help
+         This driver allows control of uncore frequency limits on
+         supported server platforms.
+         Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+         To compile this driver as a module, choose M here: the module
+         will be called intel-uncore-frequency.
+
 source "drivers/platform/x86/intel_speed_select_if/Kconfig"
 
 config SYSTEM76_ACPI
index 42d85a00be4ee472a04f4c40a875fd3b02da633a..3747b1f07cf15e3be0578d6c375092c5038dd382 100644 (file)
@@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM)     += intel_atomisp2_pm.o
 obj-$(CONFIG_PCENGINES_APU2)   += pcengines-apuv2.o
 obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
 obj-$(CONFIG_SYSTEM76_ACPI)    += system76_acpi.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL)        += intel-uncore-frequency.o
index b361c73636a40859b6e9004eec62b15c95ab3308..6f12747a359a286a8a5fcfd33440264e75e17de9 100644 (file)
@@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
        { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
        { KE_IGNORE, 0x6E, },  /* Low Battery notification */
+       { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
        { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
        { KE_KEY, 0x7c, { KEY_MICMUTE } },
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
index 821b08e01635760f3c38a4d1d42741358f876824..43bb15e055292395ab659524898663c09d08d8f2 100644 (file)
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
 #define NOTIFY_KBD_BRTDWN              0xc5
 #define NOTIFY_KBD_BRTTOGGLE           0xc7
 #define NOTIFY_KBD_FBM                 0x99
+#define NOTIFY_KBD_TTP                 0xae
 
 #define ASUS_WMI_FNLOCK_BIOS_DISABLED  BIT(0)
 
@@ -81,6 +82,10 @@ MODULE_LICENSE("GPL");
 #define ASUS_FAN_BOOST_MODE_SILENT_MASK                0x02
 #define ASUS_FAN_BOOST_MODES_MASK              0x03
 
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT   0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT    2
+
 #define USB_INTEL_XUSB2PR              0xD0
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
 
@@ -198,6 +203,9 @@ struct asus_wmi {
        u8 fan_boost_mode_mask;
        u8 fan_boost_mode;
 
+       bool throttle_thermal_policy_available;
+       u8 throttle_thermal_policy_mode;
+
        // The RSOC controls the maximum charging percentage.
        bool battery_rsoc_available;
 
@@ -512,13 +520,7 @@ static void kbd_led_update(struct asus_wmi *asus)
 {
        int ctrl_param = 0;
 
-       /*
-        * bits 0-2: level
-        * bit 7: light on/off
-        */
-       if (asus->kbd_led_wk > 0)
-               ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+       ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
        asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
 }
 
@@ -1724,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev,
 // Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
 static DEVICE_ATTR_RW(fan_boost_mode);
 
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+       u32 result;
+       int err;
+
+       asus->throttle_thermal_policy_available = false;
+
+       err = asus_wmi_get_devstate(asus,
+                                   ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+                                   &result);
+       if (err) {
+               if (err == -ENODEV)
+                       return 0;
+               return err;
+       }
+
+       if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+               asus->throttle_thermal_policy_available = true;
+
+       return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+       int err;
+       u8 value;
+       u32 retval;
+
+       value = asus->throttle_thermal_policy_mode;
+
+       err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+                                   value, &retval);
+       if (err) {
+               pr_warn("Failed to set throttle thermal policy: %d\n", err);
+               return err;
+       }
+
+       if (retval != 1) {
+               pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+                       retval);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+       if (!asus->throttle_thermal_policy_available)
+               return 0;
+
+       asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+       return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+       u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+
+       if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+               new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+       asus->throttle_thermal_policy_mode = new_mode;
+       return throttle_thermal_policy_write(asus);
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+       u8 mode = asus->throttle_thermal_policy_mode;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       int result;
+       u8 new_mode;
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+
+       result = kstrtou8(buf, 10, &new_mode);
+       if (result < 0)
+               return result;
+
+       if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+               return -EINVAL;
+
+       asus->throttle_thermal_policy_mode = new_mode;
+       throttle_thermal_policy_write(asus);
+
+       return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
 /* Backlight ******************************************************************/
 
 static int read_backlight_power(struct asus_wmi *asus)
@@ -2005,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
                return;
        }
 
+       if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
+               throttle_thermal_policy_switch_next(asus);
+               return;
+       }
+
        if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
                return;
 
@@ -2155,6 +2263,7 @@ static struct attribute *platform_attributes[] = {
        &dev_attr_lid_resume.attr,
        &dev_attr_als_enable.attr,
        &dev_attr_fan_boost_mode.attr,
+       &dev_attr_throttle_thermal_policy.attr,
        NULL
 };
 
@@ -2178,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
                devid = ASUS_WMI_DEVID_ALS_ENABLE;
        else if (attr == &dev_attr_fan_boost_mode.attr)
                ok = asus->fan_boost_mode_available;
+       else if (attr == &dev_attr_throttle_thermal_policy.attr)
+               ok = asus->throttle_thermal_policy_available;
 
        if (devid != -1)
                ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -2437,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev)
        if (err)
                goto fail_fan_boost_mode;
 
+       err = throttle_thermal_policy_check_present(asus);
+       if (err)
+               goto fail_throttle_thermal_policy;
+       else
+               throttle_thermal_policy_set_default(asus);
+
        err = asus_wmi_sysfs_init(asus->platform_device);
        if (err)
                goto fail_sysfs;
@@ -2521,6 +2638,7 @@ fail_hwmon:
 fail_input:
        asus_wmi_sysfs_exit(asus->platform_device);
 fail_sysfs:
+fail_throttle_thermal_policy:
 fail_fan_boost_mode:
 fail_platform:
        kfree(asus);
index be85ed966bf33bd7b809440f53dd0dac1b405273..b471b86c28fe8d2e15aca3766de871e2dfececad 100644 (file)
 
 #define MAX_SPEED 3
 
-static int temp_limits[3] = { 55000, 60000, 65000 };
+#define TEMP_LIMIT0_DEFAULT    55000
+#define TEMP_LIMIT1_DEFAULT    60000
+#define TEMP_LIMIT2_DEFAULT    65000
+
+#define HYSTERESIS_DEFAULT     3000
+
+#define SPEED_ON_AC_DEFAULT    2
+
+static int temp_limits[3] = {
+       TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
 module_param_array(temp_limits, int, NULL, 0444);
 MODULE_PARM_DESC(temp_limits,
                 "Millicelsius values above which the fan speed increases");
 
-static int hysteresis = 3000;
+static int hysteresis = HYSTERESIS_DEFAULT;
 module_param(hysteresis, int, 0444);
 MODULE_PARM_DESC(hysteresis,
                 "Hysteresis in millicelsius before lowering the fan speed");
 
-static int speed_on_ac = 2;
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
 module_param(speed_on_ac, int, 0444);
 MODULE_PARM_DESC(speed_on_ac,
                 "minimum fan speed to allow when system is powered by AC");
@@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
-               if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+               if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
                        dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
                                temp_limits[i]);
-                       return -EINVAL;
+                       temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+                       temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+                       temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+                       break;
                }
        }
        if (hysteresis < 1000 || hysteresis > 10000) {
                dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
                        hysteresis);
-               return -EINVAL;
+               hysteresis = HYSTERESIS_DEFAULT;
        }
        if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
                dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
                        speed_on_ac);
-               return -EINVAL;
+               speed_on_ac = SPEED_ON_AC_DEFAULT;
        }
 
        fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
index 9579a706fc0823c847c72937c4054409854f9995..a881b709af25633274690daea073b13cdc61609c 100644 (file)
@@ -300,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void)
 
 static int __init hp_wmi_bios_2009_later(void)
 {
-       int state = 0;
+       u8 state[128];
        int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
                                       sizeof(state), sizeof(state));
        if (!ret)
index ef6d4bd77b1a10dcf42728a654a58a5e1534374e..43d590250228cbe6595df289c0be59fe7cb8eb2e 100644 (file)
@@ -19,6 +19,7 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alex Hung");
 
 static const struct acpi_device_id intel_hid_ids[] = {
+       {"INT1051", 0},
        {"INT33D5", 0},
        {"", 0},
 };
diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c
new file mode 100644 (file)
index 0000000..2b1a073
--- /dev/null
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_UNCORE_RATIO_LIMIT                 0x620
+#define UNCORE_FREQ_KHZ_MULTIPLIER             100000
+
+/**
+ * struct uncore_data -        Encapsulate all uncore data
+ * @stored_uncore_data:        Last user changed MSR 620 value, which will be restored
+ *                     on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu:       Designated CPU for a die to read/write
+ * @valid:             Mark the data valid/invalid
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+       struct kobject kobj;
+       u64 stored_uncore_data;
+       u32 initial_min_freq_khz;
+       u32 initial_max_freq_khz;
+       int control_cpu;
+       bool valid;
+};
+
+#define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Root of the all uncore sysfs kobjs */
+struct kobject uncore_root_kobj;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+
+struct uncore_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct kobject *kobj,
+                       struct attribute *attr, char *buf);
+       ssize_t (*store)(struct kobject *kobj,
+                        struct attribute *attr, const char *c, ssize_t count);
+};
+
+#define define_one_uncore_ro(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_uncore_rw(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define show_uncore_data(member_name)                                  \
+       static ssize_t show_##member_name(struct kobject *kobj,         \
+                                         struct attribute *attr,       \
+                                         char *buf)                    \
+       {                                                               \
+               struct uncore_data *data = to_uncore_data(kobj);        \
+               return scnprintf(buf, PAGE_SIZE, "%u\n",                \
+                                data->member_name);                    \
+       }                                                               \
+       define_one_uncore_ro(member_name)
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+/* Common function to read MSR 0x620 and read min/max */
+static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
+                            unsigned int *max)
+{
+       u64 cap;
+       int ret;
+
+       ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+       if (ret)
+               return ret;
+
+       *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+       *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+       return 0;
+}
+
+/* Common function to set min/max ratios to be used by sysfs callbacks */
+static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
+                             int set_max)
+{
+       int ret;
+       u64 cap;
+
+       mutex_lock(&uncore_lock);
+
+       input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+       if (!input || input > 0x7F) {
+               ret = -EINVAL;
+               goto finish_write;
+       }
+
+       ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+       if (ret)
+               goto finish_write;
+
+       if (set_max) {
+               cap &= ~0x7F;
+               cap |= input;
+       } else  {
+               cap &= ~GENMASK(14, 8);
+               cap |= (input << 8);
+       }
+
+       ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+       if (ret)
+               goto finish_write;
+
+       data->stored_uncore_data = cap;
+
+finish_write:
+       mutex_unlock(&uncore_lock);
+
+       return ret;
+}
+
+static ssize_t store_min_max_freq_khz(struct kobject *kobj,
+                                     struct attribute *attr,
+                                     const char *buf, ssize_t count,
+                                     int min_max)
+{
+       struct uncore_data *data = to_uncore_data(kobj);
+       unsigned int input;
+
+       if (kstrtouint(buf, 10, &input))
+               return -EINVAL;
+
+       uncore_write_ratio(data, input, min_max);
+
+       return count;
+}
+
+static ssize_t show_min_max_freq_khz(struct kobject *kobj,
+                                    struct attribute *attr,
+                                    char *buf, int min_max)
+{
+       struct uncore_data *data = to_uncore_data(kobj);
+       unsigned int min, max;
+       int ret;
+
+       mutex_lock(&uncore_lock);
+       ret = uncore_read_ratio(data, &min, &max);
+       mutex_unlock(&uncore_lock);
+       if (ret)
+               return ret;
+
+       if (min_max)
+               return sprintf(buf, "%u\n", max);
+
+       return sprintf(buf, "%u\n", min);
+}
+
+#define store_uncore_min_max(name, min_max)                            \
+       static ssize_t store_##name(struct kobject *kobj,               \
+                                   struct attribute *attr,             \
+                                   const char *buf, ssize_t count)     \
+       {                                                               \
+                                                                       \
+               return store_min_max_freq_khz(kobj, attr, buf, count,   \
+                                             min_max);                 \
+       }
+
+#define show_uncore_min_max(name, min_max)                             \
+       static ssize_t show_##name(struct kobject *kobj,                \
+                                  struct attribute *attr, char *buf)   \
+       {                                                               \
+                                                                       \
+               return show_min_max_freq_khz(kobj, attr, buf, min_max); \
+       }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+define_one_uncore_rw(min_freq_khz);
+define_one_uncore_rw(max_freq_khz);
+
+static struct attribute *uncore_attrs[] = {
+       &initial_min_freq_khz.attr,
+       &initial_max_freq_khz.attr,
+       &max_freq_khz.attr,
+       &min_freq_khz.attr,
+       NULL
+};
+
+static struct kobj_type uncore_ktype = {
+       .sysfs_ops = &kobj_sysfs_ops,
+       .default_attrs = uncore_attrs,
+};
+
+static struct kobj_type uncore_root_ktype = {
+       .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+       int id = topology_logical_die_id(cpu);
+
+       if (id >= 0 && id < uncore_max_entries)
+               return &uncore_instances[id];
+
+       return NULL;
+}
+
+static void uncore_add_die_entry(int cpu)
+{
+       struct uncore_data *data;
+
+       mutex_lock(&uncore_lock);
+       data = uncore_get_instance(cpu);
+       if (!data) {
+               mutex_unlock(&uncore_lock);
+               return;
+       }
+
+       if (data->valid) {
+               /* control cpu changed */
+               data->control_cpu = cpu;
+       } else {
+               char str[64];
+               int ret;
+
+               memset(data, 0, sizeof(*data));
+               sprintf(str, "package_%02d_die_%02d",
+                       topology_physical_package_id(cpu),
+                       topology_die_id(cpu));
+
+               uncore_read_ratio(data, &data->initial_min_freq_khz,
+                                 &data->initial_max_freq_khz);
+
+               ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
+                                          &uncore_root_kobj, str);
+               if (!ret) {
+                       data->control_cpu = cpu;
+                       data->valid = true;
+               }
+       }
+       mutex_unlock(&uncore_lock);
+}
+
+/* Last CPU in this die is offline, so remove sysfs entries */
+static void uncore_remove_die_entry(int cpu)
+{
+       struct uncore_data *data;
+
+       mutex_lock(&uncore_lock);
+       data = uncore_get_instance(cpu);
+       if (data) {
+               kobject_put(&data->kobj);
+               data->control_cpu = -1;
+               data->valid = false;
+       }
+       mutex_unlock(&uncore_lock);
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+       int target;
+
+       /* Check if there is an online cpu in the package for uncore MSR */
+       target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+       if (target < nr_cpu_ids)
+               return 0;
+
+       /* Use this CPU on this die as a control CPU */
+       cpumask_set_cpu(cpu, &uncore_cpu_mask);
+       uncore_add_die_entry(cpu);
+
+       return 0;
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+       int target;
+
+       /* Check if existing cpu is used for uncore MSRs */
+       if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+               return 0;
+
+       /* Find a new cpu to set uncore MSR */
+       target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+       if (target < nr_cpu_ids) {
+               cpumask_set_cpu(target, &uncore_cpu_mask);
+               uncore_add_die_entry(target);
+       } else {
+               uncore_remove_die_entry(cpu);
+       }
+
+       return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+                           void *_unused)
+{
+       int cpu;
+
+       switch (mode) {
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+       case PM_POST_SUSPEND:
+               for_each_cpu(cpu, &uncore_cpu_mask) {
+                       struct uncore_data *data;
+                       int ret;
+
+                       data = uncore_get_instance(cpu);
+                       if (!data || !data->valid || !data->stored_uncore_data)
+                               continue;
+
+                       ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
+                                           data->stored_uncore_data);
+                       if (ret)
+                               return ret;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+       .notifier_call = uncore_pm_notify,
+};
+
+#define ICPU(model)     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+       ICPU(INTEL_FAM6_BROADWELL_G),
+       ICPU(INTEL_FAM6_BROADWELL_X),
+       ICPU(INTEL_FAM6_BROADWELL_D),
+       ICPU(INTEL_FAM6_SKYLAKE_X),
+       ICPU(INTEL_FAM6_ICELAKE_X),
+       ICPU(INTEL_FAM6_ICELAKE_D),
+       {}
+};
+
+static int __init intel_uncore_init(void)
+{
+       const struct x86_cpu_id *id;
+       int ret;
+
+       id = x86_match_cpu(intel_uncore_cpu_ids);
+       if (!id)
+               return -ENODEV;
+
+       uncore_max_entries = topology_max_packages() *
+                                       topology_max_die_per_package();
+       uncore_instances = kcalloc(uncore_max_entries,
+                                  sizeof(*uncore_instances), GFP_KERNEL);
+       if (!uncore_instances)
+               return -ENOMEM;
+
+       ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
+                                  &cpu_subsys.dev_root->kobj,
+                                  "intel_uncore_frequency");
+       if (ret)
+               goto err_free;
+
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                               "platform/x86/uncore-freq:online",
+                               uncore_event_cpu_online,
+                               uncore_event_cpu_offline);
+       if (ret < 0)
+               goto err_rem_kobj;
+
+       uncore_hp_state = ret;
+
+       ret = register_pm_notifier(&uncore_pm_nb);
+       if (ret)
+               goto err_rem_state;
+
+       return 0;
+
+err_rem_state:
+       cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+       kobject_put(&uncore_root_kobj);
+err_free:
+       kfree(uncore_instances);
+
+       return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+       int i;
+
+       unregister_pm_notifier(&uncore_pm_nb);
+       cpuhp_remove_state(uncore_hp_state);
+       for (i = 0; i < uncore_max_entries; ++i) {
+               if (uncore_instances[i].valid)
+                       kobject_put(&uncore_instances[i].kobj);
+       }
+       kobject_put(&uncore_root_kobj);
+       kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
index b0f421fea2a58ed61d0063625c1855d61e575108..805fc0d8515c43aa11ca6c911c5689f1c13b1cfd 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
- * Trail devices. The sole purpose of this driver is to allow the ISP to
- * be put in D3.
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
  *
  * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
  *
@@ -36,8 +36,7 @@
 static int isp_set_power(struct pci_dev *dev, bool enable)
 {
        unsigned long timeout;
-       u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
-               ISPSSPM0_IUNIT_POWER_OFF;
+       u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
 
        /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
        iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
@@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable)
 
        /*
         * There should be no IUNIT access while power-down is
-        * in progress HW sighting: 4567865
+        * in progress. HW sighting: 4567865.
         * Wait up to 50 ms for the IUNIT to shut down.
         * And we do the same for power on.
         */
        timeout = jiffies + msecs_to_jiffies(50);
-       while (1) {
+       do {
                u32 tmp;
 
                /* Wait until ISPSSPM0 bit[25:24] shows the right value */
                iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
                tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
                if (tmp == val)
-                       break;
+                       return 0;
 
-               if (time_after(jiffies, timeout)) {
-                       dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
-                               enable ? "on" : "off");
-                       return -EBUSY;
-               }
                usleep_range(1000, 2000);
-       }
+       } while (time_before(jiffies, timeout));
 
-       return 0;
+       dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+       return -EBUSY;
 }
 
 static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
index 2d097fc2dd465b4879f5ba4465000a8d4e3c6364..04138215956bf80967bc70466287f2658b2413e8 100644 (file)
@@ -36,30 +36,6 @@ enum {
        INT33FE_NODE_MAX,
 };
 
-static const struct software_node nodes[];
-
-static const struct software_node_ref_args pi3usb30532_ref = {
-       &nodes[INT33FE_NODE_PI3USB30532]
-};
-
-static const struct software_node_ref_args dp_ref = {
-       &nodes[INT33FE_NODE_DISPLAYPORT]
-};
-
-static struct software_node_ref_args mux_ref;
-
-static const struct software_node_reference usb_connector_refs[] = {
-       { "orientation-switch", 1, &pi3usb30532_ref},
-       { "mode-switch", 1, &pi3usb30532_ref},
-       { "displayport", 1, &dp_ref},
-       { }
-};
-
-static const struct software_node_reference fusb302_refs[] = {
-       { "usb-role-switch", 1, &mux_ref},
-       { }
-};
-
 /*
  * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
  * the max17047 both through the INT33FE ACPI device (it is right there
@@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = {
        { }
 };
 
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+       { .node = NULL },
+};
+
 static const struct property_entry fusb302_props[] = {
        PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+       PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
        { }
 };
 
@@ -112,6 +98,8 @@ static const u32 snk_pdo[] = {
        PDO_VAR(5000, 12000, 3000),
 };
 
+static const struct software_node nodes[];
+
 static const struct property_entry usb_connector_props[] = {
        PROPERTY_ENTRY_STRING("data-role", "dual"),
        PROPERTY_ENTRY_STRING("power-role", "dual"),
@@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = {
        PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
        PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
        PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+       PROPERTY_ENTRY_REF("orientation-switch",
+                          &nodes[INT33FE_NODE_PI3USB30532]),
+       PROPERTY_ENTRY_REF("mode-switch",
+                          &nodes[INT33FE_NODE_PI3USB30532]),
+       PROPERTY_ENTRY_REF("displayport",
+                          &nodes[INT33FE_NODE_DISPLAYPORT]),
        { }
 };
 
 static const struct software_node nodes[] = {
-       { "fusb302", NULL, fusb302_props, fusb302_refs },
+       { "fusb302", NULL, fusb302_props },
        { "max17047", NULL, max17047_props },
        { "pi3usb30532" },
        { "displayport" },
-       { "connector", &nodes[0], usb_connector_props, usb_connector_refs },
+       { "connector", &nodes[0], usb_connector_props },
        { }
 };
 
@@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
 {
        software_node_unregister_nodes(nodes);
 
-       if (mux_ref.node) {
-               fwnode_handle_put(software_node_fwnode(mux_ref.node));
-               mux_ref.node = NULL;
+       if (fusb302_mux_refs[0].node) {
+               fwnode_handle_put(
+                       software_node_fwnode(fusb302_mux_refs[0].node));
+               fusb302_mux_refs[0].node = NULL;
        }
 
        if (data->dp) {
@@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
 
 static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
 {
+       const struct software_node *mux_ref_node;
        int ret;
 
-       ret = software_node_register_nodes(nodes);
-       if (ret)
-               return ret;
-
-       /* The devices that are not created in this driver need extra steps. */
-
        /*
         * There is no ACPI device node for the USB role mux, so we need to wait
         * until the mux driver has created software node for the mux device.
         * It means we depend on the mux driver. This function will return
         * -EPROBE_DEFER until the mux device is registered.
         */
-       mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
-       if (!mux_ref.node) {
-               ret = -EPROBE_DEFER;
-               goto err_remove_nodes;
-       }
+       mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+       if (!mux_ref_node)
+               return -EPROBE_DEFER;
+
+       /*
+        * Update node used in "usb-role-switch" property. Note that we
+        * rely on software_node_register_nodes() to use the original
+        * instance of properties instead of copying them.
+        */
+       fusb302_mux_refs[0].node = mux_ref_node;
+
+       ret = software_node_register_nodes(nodes);
+       if (ret)
+               return ret;
+
+       /* The devices that are not created in this driver need extra steps. */
 
        /*
         * The DP connector does have ACPI device node. In this case we can just
index 512ad234ad0d67fccdb05d241d37fd887da164d5..35ed9711c7b97d3554dceb135432ceaa1729de66 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2010 Intel Corporation
  */
index 292bace83f1e3d95a8dae813370b1fc6610bc0df..6f436836fe501149049701e45e7f0c594f7e49e1 100644 (file)
@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       ddata = (struct mid_pb_ddata *)id->driver_data;
+       ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
+                            sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
-               return -ENODATA;
+               return -ENOMEM;
 
        ddata->dev = &pdev->dev;
        ddata->irq = irq;
index 571b4754477c5ca8e7d7600f2330669b3c9c196b..144faa8bad3d20715e0d9197c8dcdecd9673e31b 100644 (file)
@@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = {
        {"GEN2 USB2PCIE2 PLL",          SPT_PMC_BIT_MPHY_CMN_LANE1},
        {"DMIPCIE3 PLL",                SPT_PMC_BIT_MPHY_CMN_LANE2},
        {"SATA PLL",                    SPT_PMC_BIT_MPHY_CMN_LANE3},
-       {},
+       {}
 };
 
 static const struct pmc_bit_map spt_mphy_map[] = {
@@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = {
        {"MPHY CORE LANE 13",          SPT_PMC_BIT_MPHY_LANE13},
        {"MPHY CORE LANE 14",          SPT_PMC_BIT_MPHY_LANE14},
        {"MPHY CORE LANE 15",          SPT_PMC_BIT_MPHY_LANE15},
-       {},
+       {}
 };
 
 static const struct pmc_bit_map spt_pfear_map[] = {
@@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = {
        {"CSME_SMS1",                   SPT_PMC_BIT_CSME_SMS1},
        {"CSME_RTC",                    SPT_PMC_BIT_CSME_RTC},
        {"CSME_PSF",                    SPT_PMC_BIT_CSME_PSF},
-       {},
+       {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+       spt_pfear_map,
+       NULL
 };
 
 static const struct pmc_bit_map spt_ltr_show_map[] = {
@@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = {
 };
 
 static const struct pmc_reg_map spt_reg_map = {
-       .pfear_sts = spt_pfear_map,
+       .pfear_sts = ext_spt_pfear_map,
        .mphy_sts = spt_mphy_map,
        .pll_sts = spt_pll_map,
        .ltr_show_sts = spt_ltr_show_map,
@@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
        {"SDX",                 BIT(4)},
        {"SPE",                 BIT(5)},
        {"Fuse",                BIT(6)},
-       /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+       /*
+        * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+        * Tiger Lake and Elkhart Lake.
+        */
        {"SBR8",                BIT(7)},
 
        {"CSME_FSC",            BIT(0)},
@@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
        {"HDA_PGD4",            BIT(2)},
        {"HDA_PGD5",            BIT(3)},
        {"HDA_PGD6",            BIT(4)},
-       /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+       /*
+        * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+        * Tiger Lake and ELkhart Lake.
+        */
        {"PSF6",                BIT(5)},
        {"PSF7",                BIT(6)},
        {"PSF8",                BIT(7)},
+       {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+       cnp_pfear_map,
+       NULL
+};
 
+static const struct pmc_bit_map icl_pfear_map[] = {
        /* Ice Lake generation onwards only */
        {"RES_65",              BIT(0)},
        {"RES_66",              BIT(1)},
@@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
        {}
 };
 
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+       cnp_pfear_map,
+       icl_pfear_map,
+       NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+       /* Tiger Lake and Elkhart Lake generation onwards only */
+       {"PSF9",                BIT(0)},
+       {"RES_66",              BIT(1)},
+       {"RES_67",              BIT(2)},
+       {"RES_68",              BIT(3)},
+       {"RES_69",              BIT(4)},
+       {"RES_70",              BIT(5)},
+       {"TBTLSX",              BIT(6)},
+       {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+       cnp_pfear_map,
+       tgl_pfear_map,
+       NULL
+};
+
 static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
        {"AUDIO_D3",            BIT(0)},
        {"OTG_D3",              BIT(1)},
@@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
        cnp_slps0_dbg0_map,
        cnp_slps0_dbg1_map,
        cnp_slps0_dbg2_map,
-       NULL,
+       NULL
 };
 
 static const struct pmc_bit_map cnp_ltr_show_map[] = {
@@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
 };
 
 static const struct pmc_reg_map cnp_reg_map = {
-       .pfear_sts = cnp_pfear_map,
+       .pfear_sts = ext_cnp_pfear_map,
        .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
        .slps0_dbg_maps = cnp_slps0_dbg_maps,
        .ltr_show_sts = cnp_ltr_show_map,
@@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = {
 };
 
 static const struct pmc_reg_map icl_reg_map = {
-       .pfear_sts = cnp_pfear_map,
+       .pfear_sts = ext_icl_pfear_map,
        .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
        .slps0_dbg_maps = cnp_slps0_dbg_maps,
        .ltr_show_sts = cnp_ltr_show_map,
@@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = {
        .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
 };
 
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
-       return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map tgl_reg_map = {
+       .pfear_sts = ext_tgl_pfear_map,
+       .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+       .slps0_dbg_maps = cnp_slps0_dbg_maps,
+       .ltr_show_sts = cnp_ltr_show_map,
+       .msr_sts = msr_map,
+       .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+       .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+       .regmap_length = CNP_PMC_MMIO_REG_LEN,
+       .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+       .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+       .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+       .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+       .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+};
 
 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
 {
        return readl(pmcdev->regbase + reg_offset);
 }
 
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
-                                                       reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+                                     u32 val)
 {
        writel(val, pmcdev->regbase + reg_offset);
 }
@@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void)
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 static bool slps0_dbg_latch;
 
-static void pmc_core_display_map(struct seq_file *s, int index,
-                                u8 pf_reg, const struct pmc_bit_map *pf_map)
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+       return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+                                u8 pf_reg, const struct pmc_bit_map **pf_map)
 {
        seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
-                  index, pf_map[index].name,
-                  pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+                  ip, pf_map[idx][index].name,
+                  pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
 }
 
 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
 {
        struct pmc_dev *pmcdev = s->private;
-       const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+       const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
        u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
-       int index, iter;
+       int index, iter, idx, ip = 0;
 
        iter = pmcdev->map->ppfear0_offset;
 
@@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
             index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
                pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
 
-       for (index = 0; map[index].name &&
-            index < pmcdev->map->ppfear_buckets * 8; index++)
-               pmc_core_display_map(s, index, pf_regs[index / 8], map);
+       for (idx = 0; maps[idx]; idx++) {
+               for (index = 0; maps[idx][index].name &&
+                    index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+                       pmc_core_display_map(s, index, idx, ip,
+                                            pf_regs[index / 8], maps);
+       }
 
        return 0;
 }
@@ -561,21 +623,22 @@ out_unlock:
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+                                        const char __user *userbuf,
+                                        size_t count, loff_t *ppos)
 {
        struct pmc_dev *pmcdev = &pmc;
        const struct pmc_reg_map *map = pmcdev->map;
        u32 val, buf_size, fd;
-       int err = 0;
+       int err;
 
        buf_size = count < 64 ? count : 64;
-       mutex_lock(&pmcdev->lock);
 
-       if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
-               err = -EFAULT;
-               goto out_unlock;
-       }
+       err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+       if (err)
+               return err;
+
+       mutex_lock(&pmcdev->lock);
 
        if (val > map->ltr_ignore_max) {
                err = -EINVAL;
@@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
        debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
                            &pmc_core_dev_state);
 
-       debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
-                           &pmc_core_ppfear_fops);
+       if (pmcdev->map->pfear_sts)
+               debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+                                   pmcdev, &pmc_core_ppfear_fops);
 
        debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
                            &pmc_core_ltr_ignore_ops);
@@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
        INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
        INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
+       INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
+       INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
+       INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
        {}
 };
 
 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
 
 static const struct pci_device_id pmc_pci_ids[] = {
-       { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
-       { 0, },
+       { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+       { }
 };
 
 /*
  * This quirk can be used on those platforms where
- * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * the platform BIOS enforces 24Mhz crystal to shutdown
  * before PMC can assert SLP_S0#.
  */
 static int quirk_xtal_ignore(const struct dmi_system_id *id)
index fdee5772e5322a9112a3c40d0f7cf494841f98ee..f1a0792b3f9101cf17d9ae354d2facf5d16c28a7 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Intel Core SoC Power Management Controller Header File
  *
@@ -186,6 +186,8 @@ enum ppfear_regs {
 #define ICL_NUM_IP_IGN_ALLOWED                 20
 #define ICL_PMC_LTR_WIGIG                      0x1BFC
 
+#define TGL_NUM_IP_IGN_ALLOWED                 22
+
 struct pmc_bit_map {
        const char *name;
        u32 bit_mask;
@@ -213,7 +215,7 @@ struct pmc_bit_map {
  * captures them to have a common implementation.
  */
 struct pmc_reg_map {
-       const struct pmc_bit_map *pfear_sts;
+       const struct pmc_bit_map **pfear_sts;
        const struct pmc_bit_map *mphy_sts;
        const struct pmc_bit_map *pll_sts;
        const struct pmc_bit_map **slps0_dbg_maps;
index 6fe829f30997d07e5986362ed4a94fc8ad3582d6..e1266f5c63593e958a7d9412ff2166b3556d8209 100644 (file)
@@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
        INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
        INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
        INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
+       INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
+       INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
index 5c1da2bb14353ee3d6c6ede3de998469dc59b9cb..2433bf73f1eda943c73f7b4dc6e6b04293dceabc 100644 (file)
  */
 
 #include <linux/acpi.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
 #include <linux/delay.h>
-#include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/notifier.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/suspend.h>
 
 #include <asm/intel_pmc_ipc.h>
 
@@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset)
        writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
 }
 
-static inline u8 __maybe_unused ipc_data_readb(u32 offset)
-{
-       return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
 static inline u32 ipc_data_readl(u32 offset)
 {
        return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
@@ -210,35 +195,6 @@ static inline int is_gcr_valid(u32 offset)
        return 0;
 }
 
-/**
- * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register
- * @offset:    offset of GCR register from GCR address base
- * @data:      data pointer for storing the register output
- *
- * Reads the 32-bit PMC GCR register at given offset.
- *
- * Return:     negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
-       int ret;
-
-       spin_lock(&ipcdev.gcr_lock);
-
-       ret = is_gcr_valid(offset);
-       if (ret < 0) {
-               spin_unlock(&ipcdev.gcr_lock);
-               return ret;
-       }
-
-       *data = readl(ipcdev.gcr_mem_base + offset);
-
-       spin_unlock(&ipcdev.gcr_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
-
 /**
  * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
  * @offset:    offset of GCR register from GCR address base
@@ -268,36 +224,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data)
 }
 EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
 
-/**
- * intel_pmc_gcr_write() - Write PMC GCR register
- * @offset:    offset of GCR register from GCR address base
- * @data:      register update value
- *
- * Writes the PMC GCR register of given offset with given
- * value.
- *
- * Return:     negative value on error or 0 on success.
- */
-int intel_pmc_gcr_write(u32 offset, u32 data)
-{
-       int ret;
-
-       spin_lock(&ipcdev.gcr_lock);
-
-       ret = is_gcr_valid(offset);
-       if (ret < 0) {
-               spin_unlock(&ipcdev.gcr_lock);
-               return ret;
-       }
-
-       writel(data, ipcdev.gcr_mem_base + offset);
-
-       spin_unlock(&ipcdev.gcr_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
-
 /**
  * intel_pmc_gcr_update() - Update PMC GCR register bits
  * @offset:    offset of GCR register from GCR address base
@@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
  *
  * Return:     negative value on error or 0 on success.
  */
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
 {
        u32 new_val;
        int ret = 0;
@@ -339,7 +265,6 @@ gcr_ipc_unlock:
        spin_unlock(&ipcdev.gcr_lock);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
 
 static int update_no_reboot_bit(void *priv, bool set)
 {
@@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void)
  *
  * Return:     an IPC error code or 0 on success.
  */
-int intel_pmc_ipc_simple_command(int cmd, int sub)
+static int intel_pmc_ipc_simple_command(int cmd, int sub)
 {
        int ret;
 
@@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
 
 /**
  * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
@@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
  *
  * Return:     an IPC error code or 0 on success.
  */
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
-                         u32 outlen, u32 dptr, u32 sptr)
+static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+                                u32 outlen, u32 dptr, u32 sptr)
 {
        u32 wbuf[4] = { 0 };
        int ret;
@@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
 
 /**
  * intel_pmc_ipc_command() -  IPC command with input/output data
@@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
        }
        return (ssize_t)count;
 }
+static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
 
 static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
                                             struct device_attribute *attr,
@@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
        int subcmd;
        int ret;
 
-       if (kstrtoul(buf, 0, &val))
-               return -EINVAL;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
 
        if (val)
                subcmd = 1;
@@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
        }
        return (ssize_t)count;
 }
-
-static DEVICE_ATTR(simplecmd, S_IWUSR,
-                  NULL, intel_pmc_ipc_simple_cmd_store);
-static DEVICE_ATTR(northpeak, S_IWUSR,
-                  NULL, intel_pmc_ipc_northpeak_store);
+static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
 
 static struct attribute *intel_ipc_attrs[] = {
        &dev_attr_northpeak.attr,
@@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = {
        .attrs = intel_ipc_attrs,
 };
 
+static const struct attribute_group *intel_ipc_groups[] = {
+       &intel_ipc_group,
+       NULL
+};
+
 static struct resource punit_res_array[] = {
        /* Punit BIOS */
        {
@@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
-                       ret);
-               goto err_sys;
-       }
-
        ipcdev.has_gcr_regs = true;
 
        return 0;
-err_sys:
-       devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
+
 err_irq:
        platform_device_unregister(ipcdev.tco_dev);
        platform_device_unregister(ipcdev.punit_dev);
@@ -980,7 +898,6 @@ err_irq:
 
 static int ipc_plat_remove(struct platform_device *pdev)
 {
-       sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
        devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
        platform_device_unregister(ipcdev.tco_dev);
        platform_device_unregister(ipcdev.punit_dev);
@@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = {
        .driver = {
                .name = "pmc-ipc-plat",
                .acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+               .dev_groups = intel_ipc_groups,
        },
 };
 
index cdab916fbf92775320400d1879e112dfdc8e4034..3d7da526613682348238f4f5ae97499aec7e765e 100644 (file)
 #include <asm/intel_scu_ipc.h>
 
 /* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY        0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE      0xFE /* Firmware update */
-#define IPCMSG_PCNTRL         0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION    0xF4 /* Get firmware revision */
+#define IPCMSG_PCNTRL         0xff /* Power controller unit read/write */
 
 /* Command id associated with message IPCMSG_PCNTRL */
 #define IPC_CMD_PCNTRL_W      0 /* Register write */
 #define IPC_RWBUF_SIZE    20           /* IPC Read buffer Size */
 #define IPC_IOC                  0x100         /* IPC command register IOC bit */
 
-#define PCI_DEVICE_ID_LINCROFT         0x082a
-#define PCI_DEVICE_ID_PENWELL          0x080e
-#define PCI_DEVICE_ID_CLOVERVIEW       0x08ea
-#define PCI_DEVICE_ID_TANGIER          0x11a0
-
-/* intel scu ipc driver data */
-struct intel_scu_ipc_pdata_t {
-       u32 i2c_base;
-       u32 i2c_len;
-       u8 irq_mode;
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
-       .i2c_base = 0xff12b000,
-       .i2c_len = 0x10,
-       .irq_mode = 0,
-};
-
-/* Penwell and Cloverview */
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
-       .i2c_base = 0xff12b000,
-       .i2c_len = 0x10,
-       .irq_mode = 1,
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
-       .i2c_base  = 0xff00d000,
-       .i2c_len = 0x10,
-       .irq_mode = 0,
-};
-
 struct intel_scu_ipc_dev {
        struct device *dev;
        void __iomem *ipc_base;
-       void __iomem *i2c_base;
        struct completion cmd_complete;
        u8 irq_mode;
 };
 
 static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
 
+#define IPC_STATUS             0x04
+#define IPC_STATUS_IRQ         BIT(2)
+#define IPC_STATUS_ERR         BIT(1)
+#define IPC_STATUS_BUSY                BIT(0)
+
 /*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
  */
+#define IPC_WRITE_BUFFER       0x80
 #define IPC_READ_BUFFER                0x90
 
-#define IPC_I2C_CNTRL_ADDR     0
-#define I2C_DATA_ADDR          0x04
+/* Timeout in jiffies */
+#define IPC_TIMEOUT            (3 * HZ)
 
 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
 
@@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
  */
 static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
 {
-       if (scu->irq_mode) {
-               reinit_completion(&scu->cmd_complete);
-               writel(cmd | IPC_IOC, scu->ipc_base);
-       }
-       writel(cmd, scu->ipc_base);
+       reinit_completion(&scu->cmd_complete);
+       writel(cmd | IPC_IOC, scu->ipc_base);
 }
 
 /*
@@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
  */
 static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
 {
-       writel(data, scu->ipc_base + 0x80 + offset);
+       writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
 }
 
 /*
@@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32
  */
 static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
 {
-       return __raw_readl(scu->ipc_base + 0x04);
+       return __raw_readl(scu->ipc_base + IPC_STATUS);
 }
 
 /* Read ipc byte data */
@@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
 /* Wait till scu status is busy */
 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
 {
-       u32 status = ipc_read_status(scu);
-       u32 loop_count = 100000;
+       unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
 
-       /* break if scu doesn't reset busy bit after huge retry */
-       while ((status & BIT(0)) && --loop_count) {
-               udelay(1); /* scu processing time is in few u secods */
-               status = ipc_read_status(scu);
-       }
+       do {
+               u32 status;
 
-       if (status & BIT(0)) {
-               dev_err(scu->dev, "IPC timed out");
-               return -ETIMEDOUT;
-       }
+               status = ipc_read_status(scu);
+               if (!(status & IPC_STATUS_BUSY))
+                       return (status & IPC_STATUS_ERR) ? -EIO : 0;
 
-       if (status & BIT(1))
-               return -EIO;
+               usleep_range(50, 100);
+       } while (time_before(jiffies, end));
 
-       return 0;
+       dev_err(scu->dev, "IPC timed out");
+       return -ETIMEDOUT;
 }
 
 /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
@@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
 {
        int status;
 
-       if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
+       if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
                dev_err(scu->dev, "IPC timed out\n");
                return -ETIMEDOUT;
        }
 
        status = ipc_read_status(scu);
-       if (status & BIT(1))
+       if (status & IPC_STATUS_ERR)
                return -EIO;
 
        return 0;
@@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
 }
 
 /**
- *     intel_scu_ipc_ioread8           -       read a word via the SCU
- *     @addr: register on SCU
- *     @data: return pointer for read byte
+ * intel_scu_ipc_ioread8               -       read a word via the SCU
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
  *
- *     Read a single register. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
  *
- *     This function may sleep.
+ * This function may sleep.
  */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data)
 {
@@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data)
 EXPORT_SYMBOL(intel_scu_ipc_ioread8);
 
 /**
- *     intel_scu_ipc_ioread16          -       read a word via the SCU
- *     @addr: register on SCU
- *     @data: return pointer for read word
- *
- *     Read a register pair. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
- *
- *     This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
-       u16 x[2] = {addr, addr + 1};
-       return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- *     intel_scu_ipc_ioread32          -       read a dword via the SCU
- *     @addr: register on SCU
- *     @data: return pointer for read dword
- *
- *     Read four registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
- *
- *     This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
-       u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-       return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- *     intel_scu_ipc_iowrite8          -       write a byte via the SCU
- *     @addr: register on SCU
- *     @data: byte to write
+ * intel_scu_ipc_iowrite8              -       write a byte via the SCU
+ * @addr: Register on SCU
+ * @data: Byte to write
  *
- *     Write a single register. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
  *
- *     This function may sleep.
+ * This function may sleep.
  */
 int intel_scu_ipc_iowrite8(u16 addr, u8 data)
 {
@@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data)
 EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
 
 /**
- *     intel_scu_ipc_iowrite16         -       write a word via the SCU
- *     @addr: register on SCU
- *     @data: word to write
- *
- *     Write two registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_readvv                -       read a set of registers
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
  *
- *     This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
-       u16 x[2] = {addr, addr + 1};
-       return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- *     intel_scu_ipc_iowrite32         -       write a dword via the SCU
- *     @addr: register on SCU
- *     @data: dword to write
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
  *
- *     Write four registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * The largest array length permitted by the hardware is 5 items.
  *
- *     This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
-       u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-       return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- *     intel_scu_ipc_readvv            -       read a set of registers
- *     @addr: register list
- *     @data: bytes to return
- *     @len: length of array
- *
- *     Read registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
- *
- *     The largest array length permitted by the hardware is 5 items.
- *
- *     This function may sleep.
+ * This function may sleep.
  */
 int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
 {
@@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
 EXPORT_SYMBOL(intel_scu_ipc_readv);
 
 /**
- *     intel_scu_ipc_writev            -       write a set of registers
- *     @addr: register list
- *     @data: bytes to write
- *     @len: length of array
- *
- *     Write registers. Returns 0 on success or an error code. All
- *     locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_writev                -       write a set of registers
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
  *
- *     The largest array length permitted by the hardware is 5 items.
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
  *
- *     This function may sleep.
+ * The largest array length permitted by the hardware is 5 items.
  *
+ * This function may sleep.
  */
 int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 {
@@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 EXPORT_SYMBOL(intel_scu_ipc_writev);
 
 /**
- *     intel_scu_ipc_update_register   -       r/m/w a register
- *     @addr: register address
- *     @bits: bits to update
- *     @mask: mask of bits to update
- *
- *     Read-modify-write power control unit register. The first data argument
- *     must be register value and second is mask value
- *     mask is a bitmap that indicates which bits to update.
- *     0 = masked. Don't modify this bit, 1 = modify this bit.
- *     returns 0 on success or an error code.
- *
- *     This function may sleep. Locking between SCU accesses is handled
- *     for the caller.
+ * intel_scu_ipc_update_register       -       r/m/w a register
+ * @addr: Register address
+ * @bits: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
  */
 int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
 {
@@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
 EXPORT_SYMBOL(intel_scu_ipc_update_register);
 
 /**
- *     intel_scu_ipc_simple_command    -       send a simple command
- *     @cmd: command
- *     @sub: sub type
+ * intel_scu_ipc_simple_command        -       send a simple command
+ * @cmd: Command
+ * @sub: Sub type
  *
- *     Issue a simple command to the SCU. Do not use this interface if
- *     you must then access data as any data values may be overwritten
- *     by another SCU access by the time this function returns.
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
  *
- *     This function may sleep. Locking for SCU accesses is handled for
- *     the caller.
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
  */
 int intel_scu_ipc_simple_command(int cmd, int sub)
 {
@@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub)
 EXPORT_SYMBOL(intel_scu_ipc_simple_command);
 
 /**
- *     intel_scu_ipc_command   -       command with data
- *     @cmd: command
- *     @sub: sub type
- *     @in: input data
- *     @inlen: input length in dwords
- *     @out: output data
- *     @outlein: output length in dwords
- *
- *     Issue a command to the SCU which involves data transfers. Do the
- *     data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_command       -       command with data
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in dwords
+ * @out: Output data
+ * @outlen: Output length in dwords
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
  */
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
                          u32 *out, int outlen)
@@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
 }
 EXPORT_SYMBOL(intel_scu_ipc_command);
 
-#define IPC_SPTR               0x08
-#define IPC_DPTR               0x0C
-
-/**
- * intel_scu_ipc_raw_command() - IPC command with data and pointers
- * @cmd:       IPC command code.
- * @sub:       IPC command sub type.
- * @in:                input data of this IPC command.
- * @inlen:     input data length in dwords.
- * @out:       output data of this IPC command.
- * @outlen:    output data length in dwords.
- * @sptr:      data writing to SPTR register.
- * @dptr:      data writing to DPTR register.
- *
- * Send an IPC command to SCU with input/output data and source/dest pointers.
- *
- * Return:     an IPC error code or 0 on success.
- */
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
-                             u32 *out, int outlen, u32 dptr, u32 sptr)
-{
-       struct intel_scu_ipc_dev *scu = &ipcdev;
-       int inbuflen = DIV_ROUND_UP(inlen, 4);
-       u32 inbuf[4];
-       int i, err;
-
-       /* Up to 16 bytes */
-       if (inbuflen > 4)
-               return -EINVAL;
-
-       mutex_lock(&ipclock);
-       if (scu->dev == NULL) {
-               mutex_unlock(&ipclock);
-               return -ENODEV;
-       }
-
-       writel(dptr, scu->ipc_base + IPC_DPTR);
-       writel(sptr, scu->ipc_base + IPC_SPTR);
-
-       /*
-        * SRAM controller doesn't support 8-bit writes, it only
-        * supports 32-bit writes, so we have to copy input data into
-        * the temporary buffer, and SCU FW will use the inlen to
-        * determine the actual input data length in the temporary
-        * buffer.
-        */
-       memcpy(inbuf, in, inlen);
-
-       for (i = 0; i < inbuflen; i++)
-               ipc_data_writel(scu, inbuf[i], 4 * i);
-
-       ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
-       err = intel_scu_ipc_check_status(scu);
-       if (!err) {
-               for (i = 0; i < outlen; i++)
-                       *out++ = ipc_data_readl(scu, 4 * i);
-       }
-
-       mutex_unlock(&ipclock);
-       return err;
-}
-EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
-
-/* I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ  2 /* I2C Read command */
-
-/**
- *     intel_scu_ipc_i2c_cntrl         -       I2C read/write operations
- *     @addr: I2C address + command bits
- *     @data: data to read/write
- *
- *     Perform an an I2C read/write operation via the SCU. All locking is
- *     handled for the caller. This function may sleep.
- *
- *     Returns an error code or 0 on success.
- *
- *     This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
-{
-       struct intel_scu_ipc_dev *scu = &ipcdev;
-       u32 cmd = 0;
-
-       mutex_lock(&ipclock);
-       if (scu->dev == NULL) {
-               mutex_unlock(&ipclock);
-               return -ENODEV;
-       }
-       cmd = (addr >> 24) & 0xFF;
-       if (cmd == IPC_I2C_READ) {
-               writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
-               /* Write not getting updated without delay */
-               usleep_range(1000, 2000);
-               *data = readl(scu->i2c_base + I2C_DATA_ADDR);
-       } else if (cmd == IPC_I2C_WRITE) {
-               writel(*data, scu->i2c_base + I2C_DATA_ADDR);
-               usleep_range(1000, 2000);
-               writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
-       } else {
-               dev_err(scu->dev,
-                       "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
-               mutex_unlock(&ipclock);
-               return -EIO;
-       }
-       mutex_unlock(&ipclock);
-       return 0;
-}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
-
 /*
  * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
  * When ioc bit is set to 1, caller api must wait for interrupt handler called
@@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
 static irqreturn_t ioc(int irq, void *dev_id)
 {
        struct intel_scu_ipc_dev *scu = dev_id;
+       int status = ipc_read_status(scu);
 
-       if (scu->irq_mode)
-               complete(&scu->cmd_complete);
+       writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+       complete(&scu->cmd_complete);
 
        return IRQ_HANDLED;
 }
@@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int err;
        struct intel_scu_ipc_dev *scu = &ipcdev;
-       struct intel_scu_ipc_pdata_t *pdata;
 
        if (scu->dev)           /* We support only one SCU */
                return -EBUSY;
 
-       pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
-       if (!pdata)
-               return -ENODEV;
-
-       scu->irq_mode = pdata->irq_mode;
-
        err = pcim_enable_device(pdev);
        if (err)
                return err;
@@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        scu->ipc_base = pcim_iomap_table(pdev)[0];
 
-       scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
-       if (!scu->i2c_base)
-               return -ENOMEM;
-
        err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
                               scu);
        if (err)
@@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 }
 
-#define SCU_DEVICE(id, pdata)  {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
-
 static const struct pci_device_id pci_ids[] = {
-       SCU_DEVICE(PCI_DEVICE_ID_LINCROFT,      intel_scu_ipc_lincroft_pdata),
-       SCU_DEVICE(PCI_DEVICE_ID_PENWELL,       intel_scu_ipc_penwell_pdata),
-       SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW,    intel_scu_ipc_penwell_pdata),
-       SCU_DEVICE(PCI_DEVICE_ID_TANGIER,       intel_scu_ipc_tangier_pdata),
+       { PCI_VDEVICE(INTEL, 0x080e) },
+       { PCI_VDEVICE(INTEL, 0x08ea) },
+       { PCI_VDEVICE(INTEL, 0x11a0) },
        {}
 };
 
index 3de5a3c66529d7f9f6f30c3104d9c7429c37b90a..0c2aa22c7a12eaf36d9670dc450868fbc64b4543 100644 (file)
@@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
        {0x7F, 0x00, 0x0B},
        {0x7F, 0x10, 0x12},
        {0x7F, 0x20, 0x23},
+       {0x94, 0x03, 0x03},
+       {0x95, 0x03, 0x03},
 };
 
 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
@@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
        {0xD0, 0x03, 0x08},
        {0x7F, 0x02, 0x00},
        {0x7F, 0x08, 0x00},
+       {0x95, 0x03, 0x03},
 };
 
 struct isst_cmd {
index e84d3e983e0cd71f8f1682b63793e4244bc2f6cd..8e3fb55ac1ae264df7d4e7a555b82d458b019db4 100644 (file)
@@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file,
        u32 verbosity;
        int err;
 
-       if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
-               return -EFAULT;
+       err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+       if (err)
+               return err;
 
        err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
        if (err) {
                pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
-               count = err;
+               return err;
        }
 
        return count;
@@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file,
        u32 verbosity;
        int err;
 
-       if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
-               return -EFAULT;
+       err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+       if (err)
+               return err;
 
        err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
        if (err) {
                pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
-               count = err;
+               return err;
        }
 
        return count;
index df8565bad595c72ba7b889a7d50afe2e0e75d9c6..c4c742bb23cf7c66871e93d31992f1592f16b80f 100644 (file)
@@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = {
 
 static int telemetry_pltdrv_probe(struct platform_device *pdev)
 {
-       struct resource *res0 = NULL, *res1 = NULL;
        const struct x86_cpu_id *id;
-       int size, ret = -ENOMEM;
+       void __iomem *mem;
+       int ret;
 
        id = x86_match_cpu(telemetry_cpu_ids);
        if (!id)
@@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
 
        telm_conf = (struct telemetry_plt_config *)id->driver_data;
 
-       res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res0) {
-               ret = -EINVAL;
-               goto out;
-       }
-       size = resource_size(res0);
-       if (!devm_request_mem_region(&pdev->dev, res0->start, size,
-                                    pdev->name)) {
-               ret = -EBUSY;
-               goto out;
-       }
-       telm_conf->pss_config.ssram_base_addr = res0->start;
-       telm_conf->pss_config.ssram_size = size;
+       mem = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
-       res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res1) {
-               ret = -EINVAL;
-               goto out;
-       }
-       size = resource_size(res1);
-       if (!devm_request_mem_region(&pdev->dev, res1->start, size,
-                                    pdev->name)) {
-               ret = -EBUSY;
-               goto out;
-       }
+       telm_conf->pss_config.regmap = mem;
 
-       telm_conf->ioss_config.ssram_base_addr = res1->start;
-       telm_conf->ioss_config.ssram_size = size;
+       mem = devm_platform_ioremap_resource(pdev, 1);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
-       telm_conf->pss_config.regmap = ioremap_nocache(
-                                       telm_conf->pss_config.ssram_base_addr,
-                                       telm_conf->pss_config.ssram_size);
-       if (!telm_conf->pss_config.regmap) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       telm_conf->ioss_config.regmap = ioremap_nocache(
-                               telm_conf->ioss_config.ssram_base_addr,
-                               telm_conf->ioss_config.ssram_size);
-       if (!telm_conf->ioss_config.regmap) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       telm_conf->ioss_config.regmap = mem;
 
        mutex_init(&telm_conf->telem_lock);
        mutex_init(&telm_conf->telem_trace_lock);
@@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
        return 0;
 
 out:
-       if (res0)
-               release_mem_region(res0->start, resource_size(res0));
-       if (res1)
-               release_mem_region(res1->start, resource_size(res1));
-       if (telm_conf->pss_config.regmap)
-               iounmap(telm_conf->pss_config.regmap);
-       if (telm_conf->ioss_config.regmap)
-               iounmap(telm_conf->ioss_config.regmap);
        dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
 
        return ret;
@@ -1204,9 +1163,6 @@ out:
 static int telemetry_pltdrv_remove(struct platform_device *pdev)
 {
        telemetry_clear_pltdata();
-       iounmap(telm_conf->pss_config.regmap);
-       iounmap(telm_conf->ioss_config.regmap);
-
        return 0;
 }
 
index 8fe51e43f1bc1a3f49e3d2b81c0c707057e5a311..c27548fd386acf5b0486975584a26194e0134909 100644 (file)
@@ -35,6 +35,8 @@
 #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET       0x23
 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET       0x24
 #define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION     0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET     0x2b
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET                0x2e
 #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET                0x30
 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET                0x31
 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET                0x32
@@ -46,6 +48,8 @@
 #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET        0x41
 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET     0x42
 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET        0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET     0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
 #define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
 #define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
 #define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET  0x52
@@ -68,6 +72,7 @@
 #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET    0xd1
 #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET  0xd2
 #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET    0xd3
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET        0xe2
 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET       0xe3
 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET     0xe4
 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET     0xe5
 #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET   0xf6
 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET        0xf7
 #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET        0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET    0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET    0xfc
 #define MLXPLAT_CPLD_LPC_IO_RANGE              0x100
 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF           0xdb
 #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF           0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF           0xdc
 
 #define MLXPLAT_CPLD_LPC_PIO_OFFSET            0x10000UL
 #define MLXPLAT_CPLD_LPC_REG1  ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
 #define MLXPLAT_CPLD_LPC_REG2  ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
                                  MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
                                  MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3  ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+                                 MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+                                 MLXPLAT_CPLD_LPC_PIO_OFFSET)
 
 /* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
 #define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF        0x04
 #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
 #define MLXPLAT_CPLD_PSU_MASK          GENMASK(1, 0)
 #define MLXPLAT_CPLD_PWR_MASK          GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK      GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK      GENMASK(3, 0)
 #define MLXPLAT_CPLD_FAN_MASK          GENMASK(3, 0)
 #define MLXPLAT_CPLD_ASIC_MASK         GENMASK(1, 0)
 #define MLXPLAT_CPLD_FAN_NG_MASK       GENMASK(5, 0)
 #define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK        GENMASK(7, 4)
 #define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK        GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK  GENMASK(5, 4)
+#define MLXPLAT_CPLD_I2C_CAP_BIT       0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK      GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF        (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+                                        MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK   0xc1
 
 /* Default I2C parent bus number */
 #define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR       1
 
 /* Maximum number of possible physical buses equipped on system */
 #define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM      16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM  24
 
 /* Number of channels in group */
 #define MLXPLAT_CPLD_GRP_CHNL_NUM              8
 /* Start channel numbers */
 #define MLXPLAT_CPLD_CH1                       2
 #define MLXPLAT_CPLD_CH2                       10
+#define MLXPLAT_CPLD_CH3                       18
 
 /* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS              2
+#define MLXPLAT_CPLD_LPC_MUX_DEVS              3
 
 /* Hotplug devices adapter numbers */
 #define MLXPLAT_CPLD_NR_NONE                   -1
 #define MLXPLAT_CPLD_PSU_DEFAULT_NR            10
 #define MLXPLAT_CPLD_PSU_MSNXXXX_NR            4
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2           3
 #define MLXPLAT_CPLD_FAN1_DEFAULT_NR           11
 #define MLXPLAT_CPLD_FAN2_DEFAULT_NR           12
 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR           13
@@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = {
                               IORESOURCE_IO),
 };
 
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+       {
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+               .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+               .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+       },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+       {
+               .data = mlxplat_mlxcpld_i2c_ng_items_data,
+       },
+};
+
 /* Platform next generation systems i2c data */
 static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+       .items = mlxplat_mlxcpld_i2c_ng_items,
        .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
        .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
        .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
@@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
 static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
 
 /* Platform mux data */
-static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
        {
                .parent = 1,
                .base_nr = MLXPLAT_CPLD_CH1,
@@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
 
 };
 
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+       {
+               .parent = 1,
+               .base_nr = MLXPLAT_CPLD_CH1,
+               .write_only = 1,
+               .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+               .reg_size = 1,
+               .idle_in_use = 1,
+       },
+       {
+               .parent = 1,
+               .base_nr = MLXPLAT_CPLD_CH2,
+               .write_only = 1,
+               .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+               .reg_size = 1,
+               .idle_in_use = 1,
+       },
+       {
+               .parent = 1,
+               .base_nr = MLXPLAT_CPLD_CH3,
+               .write_only = 1,
+               .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+               .reg_size = 1,
+               .idle_in_use = 1,
+       },
+
+};
+
 /* Platform hotplug devices */
 static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
        {
@@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
        },
 };
 
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+       {
+               .label = "psu1",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(0),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu2",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(1),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+};
+
 /* Platform hotplug default data */
 static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
        {
@@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
        },
 };
 
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+       {
+               .data = mlxplat_mlxcpld_comex_psu_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = MLXPLAT_CPLD_PSU_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_pwr_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = MLXPLAT_CPLD_PWR_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+               .inversed = 0,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_fan_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+               .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+               .mask = MLXPLAT_CPLD_FAN_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_asic_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+               .mask = MLXPLAT_CPLD_ASIC_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+               .inversed = 0,
+               .health = true,
+       },
+};
+
 static
 struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
        .items = mlxplat_mlxcpld_default_items,
@@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
        .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
 };
 
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+       .items = mlxplat_mlxcpld_comex_items,
+       .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+       .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+       .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+       .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+       .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
 static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
        {
                .label = "pwr1",
@@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
        .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
 };
 
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+       {
+               .label = "psu1",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(0),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu2",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(1),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu3",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(2),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+       {
+               .label = "psu4",
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = BIT(3),
+               .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+       },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+       {
+               .label = "pwr1",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(0),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+       },
+       {
+               .label = "pwr2",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(1),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+       },
+       {
+               .label = "pwr3",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(2),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+       },
+       {
+               .label = "pwr4",
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = BIT(3),
+               .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+               .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+       },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+       {
+               .data = mlxplat_mlxcpld_ext_psu_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+               .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+               .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_ext_pwr_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+               .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+               .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+               .inversed = 0,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+               .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+               .inversed = 1,
+               .health = false,
+       },
+       {
+               .data = mlxplat_mlxcpld_default_asic_items_data,
+               .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+               .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+               .mask = MLXPLAT_CPLD_ASIC_MASK,
+               .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+               .inversed = 0,
+               .health = true,
+       },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+       .items = mlxplat_mlxcpld_ext_items,
+       .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+       .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+       .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+       .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+       .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
 /* Platform led default data */
 static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
        {
@@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
                .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
 };
 
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+       {
+               .label = "status:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "status:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+       },
+       {
+               .label = "psu:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "psu:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan1:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan1:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan2:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan2:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan3:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan3:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+       {
+               .label = "fan4:green",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "fan4:red",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+       },
+       {
+               .label = "uid:blue",
+               .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+               .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+       },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+               .data = mlxplat_mlxcpld_comex_100G_led_data,
+               .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
 /* Platform register access default */
 static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
        {
@@ -1156,6 +1481,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(3),
                .mode = 0200,
        },
+       {
+               .label = "select_iio",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(6),
+               .mode = 0644,
+       },
        {
                .label = "asic_health",
                .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
@@ -1244,6 +1575,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(3),
                .mode = 0444,
        },
+       {
+               .label = "reset_platform",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(4),
+               .mode = 0444,
+       },
+       {
+               .label = "reset_soc",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(5),
+               .mode = 0444,
+       },
        {
                .label = "reset_comex_wd",
                .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
@@ -1262,6 +1605,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(1),
                .mode = 0444,
        },
+       {
+               .label = "reset_sw_pwr_off",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(2),
+               .mode = 0444,
+       },
        {
                .label = "reset_comex_thermal",
                .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
@@ -1274,6 +1623,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .mask = GENMASK(7, 0) & ~BIT(5),
                .mode = 0444,
        },
+       {
+               .label = "reset_ac_pwr_fail",
+               .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(6),
+               .mode = 0444,
+       },
        {
                .label = "psu1_on",
                .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
@@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
                .bit = GENMASK(7, 0),
                .mode = 0444,
        },
+       {
+               .label = "voltreg_update_status",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+               .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+               .bit = 5,
+               .mode = 0444,
+       },
+       {
+               .label = "vpd_wp",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(3),
+               .mode = 0644,
+       },
+       {
+               .label = "pcie_asic_reset_dis",
+               .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(4),
+               .mode = 0644,
+       },
+       {
+               .label = "config1",
+               .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+               .bit = GENMASK(7, 0),
+               .mode = 0444,
+       },
+       {
+               .label = "config2",
+               .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+               .bit = GENMASK(7, 0),
+               .mode = 0444,
+       },
+       {
+               .label = "ufm_version",
+               .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+               .bit = GENMASK(7, 0),
+               .mode = 0444,
+       },
 };
 
 static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
@@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
@@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+       case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
@@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
                return true;
        }
        return false;
@@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+       case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
@@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
                return true;
        }
        return false;
@@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
        { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
 };
 
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+       { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+         MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+       { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+       { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+       { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+       { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+       { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
 struct mlxplat_mlxcpld_regmap_context {
        void __iomem *base;
 };
@@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
        .reg_write = mlxplat_mlxcpld_reg_write,
 };
 
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 255,
+       .cache_type = REGCACHE_FLAT,
+       .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+       .readable_reg = mlxplat_mlxcpld_readable_reg,
+       .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+       .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+       .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+       .reg_read = mlxplat_mlxcpld_reg_read,
+       .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 255,
+       .cache_type = REGCACHE_FLAT,
+       .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+       .readable_reg = mlxplat_mlxcpld_readable_reg,
+       .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+       .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+       .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+       .reg_read = mlxplat_mlxcpld_reg_read,
+       .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
 static struct resource mlxplat_mlxcpld_resources[] = {
        [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
 };
@@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_default_channels[i];
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_default_channels[i]);
@@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
        mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
 
        return 1;
-};
+}
 
 static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
                mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
                mlxplat_mux_data[i].n_values =
                                ARRAY_SIZE(mlxplat_msn21xx_channels);
@@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
        mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
 
        return 1;
-};
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+       int i;
+
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+       mlxplat_mux_data = mlxplat_extended_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
+               mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+               mlxplat_mux_data[i].n_values =
+                               ARRAY_SIZE(mlxplat_msn21xx_channels);
+       }
+       mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+       mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+       mlxplat_led = &mlxplat_comex_100G_led_data;
+       mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+       mlxplat_fan = &mlxplat_default_fan_data;
+       for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+               mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+       mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+       return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+       int i;
+
+       mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+       mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+       mlxplat_mux_data = mlxplat_default_mux_data;
+       for (i = 0; i < mlxplat_mux_num; i++) {
+               mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+               mlxplat_mux_data[i].n_values =
+                               ARRAY_SIZE(mlxplat_msn21xx_channels);
+       }
+       mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+       mlxplat_hotplug->deferred_nr =
+               mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+       mlxplat_led = &mlxplat_default_ng_led_data;
+       mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+       mlxplat_fan = &mlxplat_default_fan_data;
+       for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+               mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+       mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+       mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+       return 1;
+}
 
 static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
        {
@@ -1953,6 +2469,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"),
                },
        },
+       {
+               .callback = mlxplat_dmi_comex_matched,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+               },
+       },
+       {
+               .callback = mlxplat_dmi_ng400_matched,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+               },
+       },
        {
                .callback = mlxplat_dmi_msn274x_matched,
                .matches = {
@@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
        /* Scan adapters from expected id to verify it is free. */
        *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
        for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
-            MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) {
+            mlxplat_max_adap_num; i++) {
                search_adap = i2c_get_adapter(i);
                if (search_adap) {
                        i2c_put_adapter(search_adap);
@@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
        }
 
        /* Return with error if free id for adapter is not found. */
-       if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM)
+       if (i == mlxplat_max_adap_num)
                return -ENODEV;
 
        /* Shift adapter ids, since expected parent adapter is not free. */
        *nr = i;
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       for (i = 0; i < mlxplat_mux_num; i++) {
                shift = *nr - mlxplat_mux_data[i].parent;
                mlxplat_mux_data[i].parent = *nr;
                mlxplat_mux_data[i].base_nr += shift;
@@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void)
        if (nr < 0)
                goto fail_alloc;
 
-       nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr;
+       nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
        if (mlxplat_i2c)
                mlxplat_i2c->regmap = priv->regmap;
        priv->pdev_i2c = platform_device_register_resndata(
@@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void)
                goto fail_alloc;
        }
 
-       for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+       for (i = 0; i < mlxplat_mux_num; i++) {
                priv->pdev_mux[i] = platform_device_register_resndata(
                                                &priv->pdev_i2c->dev,
                                                "i2c-mux-reg", i, NULL,
@@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void)
        platform_device_unregister(priv->pdev_led);
        platform_device_unregister(priv->pdev_hotplug);
 
-       for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+       for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
                platform_device_unregister(priv->pdev_mux[i]);
 
        platform_device_unregister(priv->pdev_i2c);
index 48b112b4f0b07da494f6a978c3a9259f6cd94406..9b11ef1a401f22961d6a99382d3d0522bf082b19 100644 (file)
@@ -2,7 +2,7 @@
 
 /*
  * PC-Engines APUv2/APUv3 board platform driver
- * for gpio buttons and LEDs
+ * for GPIO buttons and LEDs
  *
  * Copyright (C) 2018 metux IT consult
  * Author: Enrico Weigelt <info@metux.net>
 
 /*
  * NOTE: this driver only supports APUv2/3 - not APUv1, as this one
- * has completely different register layouts
+ * has completely different register layouts.
  */
 
-/* register mappings */
+/* Register mappings */
 #define APU2_GPIO_REG_LED1             AMD_FCH_GPIO_REG_GPIO57
 #define APU2_GPIO_REG_LED2             AMD_FCH_GPIO_REG_GPIO58
 #define APU2_GPIO_REG_LED3             AMD_FCH_GPIO_REG_GPIO59_DEVSLP1
@@ -35,7 +35,7 @@
 #define APU2_GPIO_REG_MPCIE2           AMD_FCH_GPIO_REG_GPIO59_DEVSLP0
 #define APU2_GPIO_REG_MPCIE3           AMD_FCH_GPIO_REG_GPIO51
 
-/* order in which the gpio lines are defined in the register list */
+/* Order in which the GPIO lines are defined in the register list */
 #define APU2_GPIO_LINE_LED1            0
 #define APU2_GPIO_LINE_LED2            1
 #define APU2_GPIO_LINE_LED3            2
@@ -44,7 +44,7 @@
 #define APU2_GPIO_LINE_MPCIE2          5
 #define APU2_GPIO_LINE_MPCIE3          6
 
-/* gpio device */
+/* GPIO device */
 
 static int apu2_gpio_regs[] = {
        [APU2_GPIO_LINE_LED1]           = APU2_GPIO_REG_LED1,
@@ -72,7 +72,7 @@ static const struct amd_fch_gpio_pdata board_apu2 = {
        .gpio_names     = apu2_gpio_names,
 };
 
-/* gpio leds device */
+/* GPIO LEDs device */
 
 static const struct gpio_led apu2_leds[] = {
        { .name = "apu:green:1" },
@@ -95,12 +95,12 @@ static struct gpiod_lookup_table gpios_led_table = {
                                NULL, 1, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
                                NULL, 2, GPIO_ACTIVE_LOW),
-               GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP,
+               GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP,
                                NULL, 3, GPIO_ACTIVE_LOW),
        }
 };
 
-/* gpio keyboard device */
+/* GPIO keyboard device */
 
 static struct gpio_keys_button apu2_keys_buttons[] = {
        {
@@ -129,12 +129,12 @@ static struct gpiod_lookup_table gpios_key_table = {
        }
 };
 
-/* board setup */
+/* Board setup */
 
-/* note: matching works on string prefix, so "apu2" must come before "apu" */
+/* Note: matching works on string prefix, so "apu2" must come before "apu" */
 static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
 
-       /* APU2 w/ legacy bios < 4.0.8 */
+       /* APU2 w/ legacy BIOS < 4.0.8 */
        {
                .ident          = "apu2",
                .matches        = {
@@ -143,7 +143,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
                },
                .driver_data    = (void *)&board_apu2,
        },
-       /* APU2 w/ legacy bios >= 4.0.8 */
+       /* APU2 w/ legacy BIOS >= 4.0.8 */
        {
                .ident          = "apu2",
                .matches        = {
@@ -152,7 +152,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
                },
                .driver_data    = (void *)&board_apu2,
        },
-       /* APU2 w/ maainline bios */
+       /* APU2 w/ mainline BIOS */
        {
                .ident          = "apu2",
                .matches        = {
@@ -162,7 +162,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
                .driver_data    = (void *)&board_apu2,
        },
 
-       /* APU3 w/ legacy bios < 4.0.8 */
+       /* APU3 w/ legacy BIOS < 4.0.8 */
        {
                .ident          = "apu3",
                .matches        = {
@@ -171,7 +171,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
                },
                .driver_data = (void *)&board_apu2,
        },
-       /* APU3 w/ legacy bios >= 4.0.8 */
+       /* APU3 w/ legacy BIOS >= 4.0.8 */
        {
                .ident       = "apu3",
                .matches     = {
@@ -180,7 +180,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
                },
                .driver_data = (void *)&board_apu2,
        },
-       /* APU3 w/ mainline bios */
+       /* APU3 w/ mainline BIOS */
        {
                .ident       = "apu3",
                .matches     = {
@@ -189,6 +189,33 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = {
                },
                .driver_data = (void *)&board_apu2,
        },
+       /* APU4 w/ legacy BIOS < 4.0.8 */
+       {
+               .ident        = "apu4",
+               .matches    = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+                       DMI_MATCH(DMI_BOARD_NAME, "APU4")
+               },
+               .driver_data = (void *)&board_apu2,
+       },
+       /* APU4 w/ legacy BIOS >= 4.0.8 */
+       {
+               .ident       = "apu4",
+               .matches     = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+                       DMI_MATCH(DMI_BOARD_NAME, "apu4")
+               },
+               .driver_data = (void *)&board_apu2,
+       },
+       /* APU4 w/ mainline BIOS */
+       {
+               .ident       = "apu4",
+               .matches     = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+                       DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu4")
+               },
+               .driver_data = (void *)&board_apu2,
+       },
        {}
 };
 
@@ -223,7 +250,7 @@ static int __init apu_board_init(void)
 
        id = dmi_first_match(apu_gpio_dmi_table);
        if (!id) {
-               pr_err("failed to detect apu board via dmi\n");
+               pr_err("failed to detect APU board via DMI\n");
                return -ENODEV;
        }
 
@@ -262,7 +289,7 @@ module_init(apu_board_init);
 module_exit(apu_board_exit);
 
 MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>");
-MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
+MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LEDs/keys driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
 MODULE_ALIAS("platform:pcengines-apuv2");
index 07d1b911e72f8eca67d4a1d1b4487e9c64f3dfa4..3e3c66dfec2e5e52e65899ccf731060b72a41ffd 100644 (file)
@@ -429,6 +429,14 @@ static const struct dmi_system_id critclk_systems[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"),
                },
        },
+       {
+               .ident = "CONNECT X300",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"),
+               },
+       },
+
        { /*sentinel*/ }
 };
 
@@ -481,7 +489,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
        pmc->base_addr &= PMC_BASE_ADDR_MASK;
 
-       pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+       pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN);
        if (!pmc->regmap) {
                dev_err(&pdev->dev, "error: ioremap failed\n");
                return -ENOMEM;
index 9b6a93ff41ffb4be87038053b3d9c7d757b2b21b..23e40aa2176e861ce98db35e7ec4ca6ae1d5c3e9 100644 (file)
@@ -1394,7 +1394,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
        int ret = 0;
        int i;
 
-       samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+       samsung->f0000_segment = ioremap(0xf0000, 0xffff);
        if (!samsung->f0000_segment) {
                if (debug || force)
                        pr_err("Can't map the segment at 0xf0000\n");
@@ -1434,7 +1434,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
        if (debug)
                samsung_sabi_infos(samsung, loca, ifaceP);
 
-       samsung->sabi_iface = ioremap_nocache(ifaceP, 16);
+       samsung->sabi_iface = ioremap(ifaceP, 16);
        if (!samsung->sabi_iface) {
                pr_err("Can't remap %x\n", ifaceP);
                ret = -EINVAL;
index 72205771d03dc80634729ed8c8145433b09d4705..93177e6e5ecde34398f41ba41ed50b735e245a1b 100644 (file)
@@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1686-digma_citi_e200.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-gp-electronic-t701.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
        { }
 };
 
@@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3676-onda-v80-plus-v3.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-onda-v820w-32g.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-y",  8),
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3680-onda-v891w-v1.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3676-onda-v891w-v3.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-pipo-w2s.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
        { }
 };
 
@@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = {
        .properties     = pipo_w2s_props,
 };
 
+static const struct property_entry pipo_w11_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = pipo_w11_props,
+};
+
 static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3692-pov-mobii-wintab-p800w.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-schneider-sct101ctm.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1686-teclast_x98plus2.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
@@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-trekstor-primebook-c11.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = {
 static const struct property_entry trekstor_primebook_c13_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-trekstor-primebook-c13.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = {
 static const struct property_entry trekstor_primetab_t13b_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1680-trekstor-primetab-t13b.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
@@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
        PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        { }
 };
@@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
        PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
-       PROPERTY_ENTRY_STRING("firmware-name",
-                             "gsl1686-surftab-wintron70-st70416-6.fw"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
@@ -909,6 +908,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
                },
        },
+       {
+               /* Pipo W11 */
+               .driver_data = (void *)&pipo_w11_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+                       /* Above matches are too generic, add bios-ver match */
+                       DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+               },
+       },
        {
                /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
                .driver_data = (void *)&trekstor_surftab_wintron70_data,
@@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
                .driver_data = (void *)&trekstor_surftab_wintron70_data,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
-                       DMI_MATCH(DMI_PRODUCT_NAME,
-                                            "SurfTab wintron 7.0 ST70416-6"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
                        /* Exact match, different versions need different fw */
                        DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
                },
@@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
 }
 
 static int ts_dmi_notifier_call(struct notifier_block *nb,
-                                      unsigned long action, void *data)
+                               unsigned long action, void *data)
 {
        struct device *dev = data;
        struct i2c_client *client;
index 179b737280e1370bf06508c944524a63196906fe..c43d8ad0252942744381d685a9faf7cd1ad6f0ea 100644 (file)
@@ -746,35 +746,13 @@ __skip:
        }
 }
 
-/*
- *  Compute ISA PnP checksum for first eight bytes.
- */
-static unsigned char __init isapnp_checksum(unsigned char *data)
-{
-       int i, j;
-       unsigned char checksum = 0x6a, bit, b;
-
-       for (i = 0; i < 8; i++) {
-               b = data[i];
-               for (j = 0; j < 8; j++) {
-                       bit = 0;
-                       if (b & (1 << j))
-                               bit = 1;
-                       checksum =
-                           ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
-                           | (checksum >> 1);
-               }
-       }
-       return checksum;
-}
-
 /*
  *  Build device list for all present ISA PnP devices.
  */
 static int __init isapnp_build_device_list(void)
 {
        int csn;
-       unsigned char header[9], checksum;
+       unsigned char header[9];
        struct pnp_card *card;
        u32 eisa_id;
        char id[8];
@@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void)
        for (csn = 1; csn <= isapnp_csn_count; csn++) {
                isapnp_wake(csn);
                isapnp_peek(header, 9);
-               checksum = isapnp_checksum(header);
                eisa_id = header[0] | header[1] << 8 |
                          header[2] << 16 | header[3] << 24;
                pnp_eisa_id_to_string(eisa_id, id);
index 089b6244b716b889ef096a05a8803a4328fd4c78..b8fe166cd0d9fd6843c51f5b69db40edd6ec9f41 100644 (file)
@@ -12,6 +12,22 @@ menuconfig POWER_AVS
 
          Say Y here to enable Adaptive Voltage Scaling class support.
 
+config QCOM_CPR
+       tristate "QCOM Core Power Reduction (CPR) support"
+       depends on POWER_AVS
+       select PM_OPP
+       select REGMAP
+       help
+         Say Y here to enable support for the CPR hardware found on Qualcomm
+         SoCs like QCS404.
+
+         This driver populates CPU OPPs tables and makes adjustments to the
+         tables based on feedback from the CPR hardware. If you want to do
+         CPUfrequency scaling say Y here.
+
+         To compile this driver as a module, choose M here: the module will
+         be called qcom-cpr
+
 config ROCKCHIP_IODOMAIN
        tristate "Rockchip IO domain support"
        depends on POWER_AVS && ARCH_ROCKCHIP && OF
index a1b8cd453f19b034d5e4aa03f1a97de4844e3a70..9007d05853e2a9c5ba2f0c5c5a06bb3918cb96ed 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_POWER_AVS_OMAP)           += smartreflex.o
+obj-$(CONFIG_QCOM_CPR)                 += qcom-cpr.o
 obj-$(CONFIG_ROCKCHIP_IODOMAIN)                += rockchip-io-domain.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644 (file)
index 0000000..9192fb7
--- /dev/null
@@ -0,0 +1,1793 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION              0
+#define RBCPR_VER_2                    0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS    BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n)       (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK  GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT   12
+#define RBCPR_GCNT_TARGET_GCNT_MASK    GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL       0x44
+#define REG_RBIF_TIMER_ADJUST          0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK    GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT   0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK  GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK  GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT                 0x48
+#define RBIF_LIMIT_CEILING_MASK                GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT       6
+#define RBIF_LIMIT_FLOOR_BITS          6
+#define RBIF_LIMIT_FLOOR_MASK          GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT     RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT       0
+
+#define REG_RBIF_SW_VLEVEL             0x94
+#define RBIF_SW_VLEVEL_DEFAULT         0x20
+
+#define REG_RBCPR_STEP_QUOT            0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK  GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK  GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL                  0x90
+
+#define RBCPR_CTL_LOOP_EN                      BIT(0)
+#define RBCPR_CTL_TIMER_EN                     BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN          BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN      BIT(6)
+#define RBCPR_CTL_COUNT_MODE                   BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK    GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT   24
+#define RBCPR_CTL_DN_THRESHOLD_MASK    GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT   28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD          0x98
+#define REG_RBIF_CONT_NACK_CMD         0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0             0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT       19
+#define RBCPR_RESULT0_BUSY_MASK                BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT  18
+#define RBCPR_RESULT0_ERROR_SHIFT      6
+#define RBCPR_RESULT0_ERROR_MASK       GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT        2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT    1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n)             (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR             0x110
+#define REG_RBIF_IRQ_STATUS            0x114
+
+#define CPR_INT_DONE           BIT(0)
+#define CPR_INT_MIN            BIT(1)
+#define CPR_INT_DOWN           BIT(2)
+#define CPR_INT_MID            BIT(3)
+#define CPR_INT_UP             BIT(4)
+#define CPR_INT_MAX            BIT(5)
+#define CPR_INT_CLAMP          BIT(6)
+#define CPR_INT_ALL    (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+                       CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT        (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC       8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF         50
+
+#define FUSE_REVISION_UNKNOWN          (-1)
+
+enum voltage_change_dir {
+       NO_CHANGE,
+       DOWN,
+       UP,
+};
+
+struct cpr_fuse {
+       char *ring_osc;
+       char *init_voltage;
+       char *quotient;
+       char *quotient_offset;
+};
+
+struct fuse_corner_data {
+       int ref_uV;
+       int max_uV;
+       int min_uV;
+       int max_volt_scale;
+       int max_quot_scale;
+       /* fuse quot */
+       int quot_offset;
+       int quot_scale;
+       int quot_adjust;
+       /* fuse quot_offset */
+       int quot_offset_scale;
+       int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+       int init_voltage_step;
+       int init_voltage_width;
+       struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+       unsigned int fuse_corner;
+       unsigned long freq;
+};
+
+struct cpr_desc {
+       unsigned int num_fuse_corners;
+       int min_diff_quot;
+       int *step_quot;
+
+       unsigned int            timer_delay_us;
+       unsigned int            timer_cons_up;
+       unsigned int            timer_cons_down;
+       unsigned int            up_threshold;
+       unsigned int            down_threshold;
+       unsigned int            idle_clocks;
+       unsigned int            gcnt_us;
+       unsigned int            vdd_apc_step_up_limit;
+       unsigned int            vdd_apc_step_down_limit;
+       unsigned int            clamp_timer_interval;
+
+       struct cpr_fuses cpr_fuses;
+       bool reduce_to_fuse_uV;
+       bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+       unsigned int    enable_reg;
+       u32             enable_mask;
+
+       struct reg_sequence     *config;
+       struct reg_sequence     *settings;
+       int                     num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+       const struct cpr_desc *cpr_desc;
+       const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+       int min_uV;
+       int max_uV;
+       int uV;
+       int quot;
+       int step_quot;
+       const struct reg_sequence *accs;
+       int num_accs;
+       unsigned long max_freq;
+       u8 ring_osc_idx;
+};
+
+struct corner {
+       int min_uV;
+       int max_uV;
+       int uV;
+       int last_uV;
+       int quot_adjust;
+       u32 save_ctl;
+       u32 save_irq;
+       unsigned long freq;
+       struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+       unsigned int            num_corners;
+       unsigned int            ref_clk_khz;
+
+       struct generic_pm_domain pd;
+       struct device           *dev;
+       struct device           *attached_cpu_dev;
+       struct mutex            lock;
+       void __iomem            *base;
+       struct corner           *corner;
+       struct regulator        *vdd_apc;
+       struct clk              *cpu_clk;
+       struct regmap           *tcsr;
+       bool                    loop_disabled;
+       u32                     gcnt;
+       unsigned long           flags;
+
+       struct fuse_corner      *fuse_corners;
+       struct corner           *corners;
+
+       const struct cpr_desc *desc;
+       const struct acc_desc *acc_desc;
+       const struct cpr_fuse *cpr_fuses;
+
+       struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+       return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+       writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+       return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+       u32 val;
+
+       val = readl_relaxed(drv->base + offset);
+       val &= ~mask;
+       val |= value & mask;
+       writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+       cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+       cpr_irq_clr(drv);
+       cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+       cpr_irq_clr(drv);
+       cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+       cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+       cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+       u32 val, mask;
+       const struct cpr_desc *desc = drv->desc;
+
+       /* Program Consecutive Up & Down */
+       val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+       val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+       mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+       cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+       cpr_masked_write(drv, REG_RBCPR_CTL,
+                        RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+                        RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+                        corner->save_ctl);
+       cpr_irq_set(drv, corner->save_irq);
+
+       if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+               val = RBCPR_CTL_LOOP_EN;
+       else
+               val = 0;
+       cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+       cpr_irq_set(drv, 0);
+       cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+                      RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+       cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+                        RBIF_TIMER_ADJ_CONS_UP_MASK |
+                        RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+       cpr_irq_clr(drv);
+       cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+       cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+       cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+       u32 reg_val;
+
+       reg_val = cpr_read(drv, REG_RBCPR_CTL);
+       return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+       u32 reg_val;
+
+       reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+       return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+       corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+       corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+       u32 gcnt, ctl, irq, ro_sel, step_quot;
+       struct fuse_corner *fuse = corner->fuse_corner;
+       const struct cpr_desc *desc = drv->desc;
+       int i;
+
+       ro_sel = fuse->ring_osc_idx;
+       gcnt = drv->gcnt;
+       gcnt |= fuse->quot - corner->quot_adjust;
+
+       /* Program the step quotient and idle clocks */
+       step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+       step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+       cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+       /* Clear the target quotient value and gate count of all ROs */
+       for (i = 0; i < CPR_NUM_RING_OSC; i++)
+               cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+       cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+       ctl = corner->save_ctl;
+       cpr_write(drv, REG_RBCPR_CTL, ctl);
+       irq = corner->save_irq;
+       cpr_irq_set(drv, irq);
+       dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+               ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+                       struct fuse_corner *end)
+{
+       if (f == end)
+               return;
+
+       if (f < end) {
+               for (f += 1; f <= end; f++)
+                       regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+       } else {
+               for (f -= 1; f >= end; f--)
+                       regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+       }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+                          struct fuse_corner *fuse_corner,
+                          enum voltage_change_dir dir)
+{
+       struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+       if (drv->tcsr && dir == DOWN)
+               cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+       return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+                           struct fuse_corner *fuse_corner,
+                           enum voltage_change_dir dir)
+{
+       struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+       if (drv->tcsr && dir == UP)
+               cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+       return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+                            int new_uV, enum voltage_change_dir dir)
+{
+       int ret;
+       struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+       ret = cpr_pre_voltage(drv, fuse_corner, dir);
+       if (ret)
+               return ret;
+
+       ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+       if (ret) {
+               dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+                                   new_uV);
+               return ret;
+       }
+
+       ret = cpr_post_voltage(drv, fuse_corner, dir);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+       return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+       u32 val, error_steps, reg_mask;
+       int last_uV, new_uV, step_uV, ret;
+       struct corner *corner;
+       const struct cpr_desc *desc = drv->desc;
+
+       if (dir != UP && dir != DOWN)
+               return 0;
+
+       step_uV = regulator_get_linear_step(drv->vdd_apc);
+       if (!step_uV)
+               return -EINVAL;
+
+       corner = drv->corner;
+
+       val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+       error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+       error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+       last_uV = corner->last_uV;
+
+       if (dir == UP) {
+               if (desc->clamp_timer_interval &&
+                   error_steps < desc->up_threshold) {
+                       /*
+                        * Handle the case where another measurement started
+                        * after the interrupt was triggered due to a core
+                        * exiting from power collapse.
+                        */
+                       error_steps = max(desc->up_threshold,
+                                         desc->vdd_apc_step_up_limit);
+               }
+
+               if (last_uV >= corner->max_uV) {
+                       cpr_irq_clr_nack(drv);
+
+                       /* Maximize the UP threshold */
+                       reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+                       reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+                       val = reg_mask;
+                       cpr_ctl_modify(drv, reg_mask, val);
+
+                       /* Disable UP interrupt */
+                       cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+                       return 0;
+               }
+
+               if (error_steps > desc->vdd_apc_step_up_limit)
+                       error_steps = desc->vdd_apc_step_up_limit;
+
+               /* Calculate new voltage */
+               new_uV = last_uV + error_steps * step_uV;
+               new_uV = min(new_uV, corner->max_uV);
+
+               dev_dbg(drv->dev,
+                       "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+                       new_uV, last_uV, cpr_get_cur_perf_state(drv));
+       } else if (dir == DOWN) {
+               if (desc->clamp_timer_interval &&
+                   error_steps < desc->down_threshold) {
+                       /*
+                        * Handle the case where another measurement started
+                        * after the interrupt was triggered due to a core
+                        * exiting from power collapse.
+                        */
+                       error_steps = max(desc->down_threshold,
+                                         desc->vdd_apc_step_down_limit);
+               }
+
+               if (last_uV <= corner->min_uV) {
+                       cpr_irq_clr_nack(drv);
+
+                       /* Enable auto nack down */
+                       reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+                       val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+                       cpr_ctl_modify(drv, reg_mask, val);
+
+                       /* Disable DOWN interrupt */
+                       cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+                       return 0;
+               }
+
+               if (error_steps > desc->vdd_apc_step_down_limit)
+                       error_steps = desc->vdd_apc_step_down_limit;
+
+               /* Calculate new voltage */
+               new_uV = last_uV - error_steps * step_uV;
+               new_uV = max(new_uV, corner->min_uV);
+
+               dev_dbg(drv->dev,
+                       "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+                       new_uV, last_uV, cpr_get_cur_perf_state(drv));
+       }
+
+       ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+       if (ret) {
+               cpr_irq_clr_nack(drv);
+               return ret;
+       }
+       drv->corner->last_uV = new_uV;
+
+       if (dir == UP) {
+               /* Disable auto nack down */
+               reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+               val = 0;
+       } else if (dir == DOWN) {
+               /* Restore default threshold for UP */
+               reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+               reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+               val = desc->up_threshold;
+               val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+       }
+
+       cpr_ctl_modify(drv, reg_mask, val);
+
+       /* Re-enable default interrupts */
+       cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+       /* Ack */
+       cpr_irq_clr_ack(drv);
+
+       return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+       struct cpr_drv *drv = dev;
+       const struct cpr_desc *desc = drv->desc;
+       irqreturn_t ret = IRQ_HANDLED;
+       u32 val;
+
+       mutex_lock(&drv->lock);
+
+       val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+       if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+               val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+       dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+       if (!cpr_ctl_is_enabled(drv)) {
+               dev_dbg(drv->dev, "CPR is disabled\n");
+               ret = IRQ_NONE;
+       } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+               dev_dbg(drv->dev, "CPR measurement is not ready\n");
+       } else if (!cpr_is_allowed(drv)) {
+               val = cpr_read(drv, REG_RBCPR_CTL);
+               dev_err_ratelimited(drv->dev,
+                                   "Interrupt broken? RBCPR_CTL = %#02x\n",
+                                   val);
+               ret = IRQ_NONE;
+       } else {
+               /*
+                * Following sequence of handling is as per each IRQ's
+                * priority
+                */
+               if (val & CPR_INT_UP) {
+                       cpr_scale(drv, UP);
+               } else if (val & CPR_INT_DOWN) {
+                       cpr_scale(drv, DOWN);
+               } else if (val & CPR_INT_MIN) {
+                       cpr_irq_clr_nack(drv);
+               } else if (val & CPR_INT_MAX) {
+                       cpr_irq_clr_nack(drv);
+               } else if (val & CPR_INT_MID) {
+                       /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+                       dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+               } else {
+                       dev_dbg(drv->dev,
+                               "IRQ occurred for unknown flag (%#08x)\n", val);
+               }
+
+               /* Save register values for the corner */
+               cpr_corner_save(drv, drv->corner);
+       }
+
+       mutex_unlock(&drv->lock);
+
+       return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+       int ret;
+
+       ret = regulator_enable(drv->vdd_apc);
+       if (ret)
+               return ret;
+
+       mutex_lock(&drv->lock);
+
+       if (cpr_is_allowed(drv) && drv->corner) {
+               cpr_irq_clr(drv);
+               cpr_corner_restore(drv, drv->corner);
+               cpr_ctl_enable(drv, drv->corner);
+       }
+
+       mutex_unlock(&drv->lock);
+
+       return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+       int ret;
+
+       mutex_lock(&drv->lock);
+
+       if (cpr_is_allowed(drv)) {
+               cpr_ctl_disable(drv);
+               cpr_irq_clr(drv);
+       }
+
+       mutex_unlock(&drv->lock);
+
+       ret = regulator_disable(drv->vdd_apc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+       int i;
+       u32 val, gcnt;
+       struct corner *corner;
+       const struct cpr_desc *desc = drv->desc;
+
+       /* Disable interrupt and CPR */
+       cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+       cpr_write(drv, REG_RBCPR_CTL, 0);
+
+       /* Program the default HW ceiling, floor and vlevel */
+       val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+               << RBIF_LIMIT_CEILING_SHIFT;
+       val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+       cpr_write(drv, REG_RBIF_LIMIT, val);
+       cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+       /*
+        * Clear the target quotient value and gate count of all
+        * ring oscillators
+        */
+       for (i = 0; i < CPR_NUM_RING_OSC; i++)
+               cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+       /* Init and save gcnt */
+       gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+       gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+       gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+       drv->gcnt = gcnt;
+
+       /* Program the delay count for the timer */
+       val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+       cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+       dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+               desc->timer_delay_us);
+
+       /* Program Consecutive Up & Down */
+       val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+       val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+       val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+       cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+       /* Program the control register */
+       val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+       val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+       val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+       val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+       cpr_write(drv, REG_RBCPR_CTL, val);
+
+       for (i = 0; i < drv->num_corners; i++) {
+               corner = &drv->corners[i];
+               corner->save_ctl = val;
+               corner->save_irq = CPR_INT_DEFAULT;
+       }
+
+       cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+       val = cpr_read(drv, REG_RBCPR_VERSION);
+       if (val <= RBCPR_VER_2)
+               drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+       return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+                                    unsigned int state)
+{
+       struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+       struct corner *corner, *end;
+       enum voltage_change_dir dir;
+       int ret = 0, new_uV;
+
+       mutex_lock(&drv->lock);
+
+       dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+               __func__, state, cpr_get_cur_perf_state(drv));
+
+       /*
+        * Determine new corner we're going to.
+        * Remove one since lowest performance state is 1.
+        */
+       corner = drv->corners + state - 1;
+       end = &drv->corners[drv->num_corners - 1];
+       if (corner > end || corner < drv->corners) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       /* Determine direction */
+       if (drv->corner > corner)
+               dir = DOWN;
+       else if (drv->corner < corner)
+               dir = UP;
+       else
+               dir = NO_CHANGE;
+
+       if (cpr_is_allowed(drv))
+               new_uV = corner->last_uV;
+       else
+               new_uV = corner->uV;
+
+       if (cpr_is_allowed(drv))
+               cpr_ctl_disable(drv);
+
+       ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+       if (ret)
+               goto unlock;
+
+       if (cpr_is_allowed(drv)) {
+               cpr_irq_clr(drv);
+               if (drv->corner != corner)
+                       cpr_corner_restore(drv, corner);
+               cpr_ctl_enable(drv, corner);
+       }
+
+       drv->corner = corner;
+
+unlock:
+       mutex_unlock(&drv->lock);
+
+       return ret;
+}
+
+static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
+{
+       struct nvmem_cell *cell;
+       ssize_t len;
+       char *ret;
+       int i;
+
+       *data = 0;
+
+       cell = nvmem_cell_get(dev, cname);
+       if (IS_ERR(cell)) {
+               if (PTR_ERR(cell) != -EPROBE_DEFER)
+                       dev_err(dev, "undefined cell %s\n", cname);
+               return PTR_ERR(cell);
+       }
+
+       ret = nvmem_cell_read(cell, &len);
+       nvmem_cell_put(cell);
+       if (IS_ERR(ret)) {
+               dev_err(dev, "can't read cell %s\n", cname);
+               return PTR_ERR(ret);
+       }
+
+       for (i = 0; i < len; i++)
+               *data |= ret[i] << (8 * i);
+
+       kfree(ret);
+       dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
+
+       return 0;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+       struct fuse_corner *fuse = drv->fuse_corners;
+       struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+       const struct cpr_fuse *fuses = drv->cpr_fuses;
+       u32 data;
+       int ret;
+
+       for (; fuse < end; fuse++, fuses++) {
+               ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
+                                    &data);
+               if (ret)
+                       return ret;
+               fuse->ring_osc_idx = data;
+       }
+
+       return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+                           const struct fuse_corner_data *fdata,
+                           const char *init_v_efuse,
+                           int step_volt,
+                           struct cpr_drv *drv)
+{
+       int step_size_uV, steps, uV;
+       u32 bits = 0;
+       int ret;
+
+       ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
+       if (ret)
+               return ret;
+
+       steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+       /* Not two's complement.. instead highest bit is sign bit */
+       if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+               steps = -steps;
+
+       step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+       uV = fdata->ref_uV + steps * step_size_uV;
+       return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+       const struct cpr_desc *desc = drv->desc;
+       const struct cpr_fuse *fuses = drv->cpr_fuses;
+       const struct acc_desc *acc_desc = drv->acc_desc;
+       int i;
+       unsigned int step_volt;
+       struct fuse_corner_data *fdata;
+       struct fuse_corner *fuse, *end;
+       int uV;
+       const struct reg_sequence *accs;
+       int ret;
+
+       accs = acc_desc->settings;
+
+       step_volt = regulator_get_linear_step(drv->vdd_apc);
+       if (!step_volt)
+               return -EINVAL;
+
+       /* Populate fuse_corner members */
+       fuse = drv->fuse_corners;
+       end = &fuse[desc->num_fuse_corners - 1];
+       fdata = desc->cpr_fuses.fuse_corner_data;
+
+       for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+               /*
+                * Update SoC voltages: platforms might choose a different
+                * regulators than the one used to characterize the algorithms
+                * (ie, init_voltage_step).
+                */
+               fdata->min_uV = roundup(fdata->min_uV, step_volt);
+               fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+               /* Populate uV */
+               uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+                                     step_volt, drv);
+               if (uV < 0)
+                       return uV;
+
+               fuse->min_uV = fdata->min_uV;
+               fuse->max_uV = fdata->max_uV;
+               fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+               if (fuse == end) {
+                       /*
+                        * Allow the highest fuse corner's PVS voltage to
+                        * define the ceiling voltage for that corner in order
+                        * to support SoC's in which variable ceiling values
+                        * are required.
+                        */
+                       end->max_uV = max(end->max_uV, end->uV);
+               }
+
+               /* Populate target quotient by scaling */
+               ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
+               if (ret)
+                       return ret;
+
+               fuse->quot *= fdata->quot_scale;
+               fuse->quot += fdata->quot_offset;
+               fuse->quot += fdata->quot_adjust;
+               fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+               /* Populate acc settings */
+               fuse->accs = accs;
+               fuse->num_accs = acc_desc->num_regs_per_fuse;
+               accs += acc_desc->num_regs_per_fuse;
+       }
+
+       /*
+        * Restrict all fuse corner PVS voltages based upon per corner
+        * ceiling and floor voltages.
+        */
+       for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+               if (fuse->uV > fuse->max_uV)
+                       fuse->uV = fuse->max_uV;
+               else if (fuse->uV < fuse->min_uV)
+                       fuse->uV = fuse->min_uV;
+
+               ret = regulator_is_supported_voltage(drv->vdd_apc,
+                                                    fuse->min_uV,
+                                                    fuse->min_uV);
+               if (!ret) {
+                       dev_err(drv->dev,
+                               "min uV: %d (fuse corner: %d) not supported by regulator\n",
+                               fuse->min_uV, i);
+                       return -EINVAL;
+               }
+
+               ret = regulator_is_supported_voltage(drv->vdd_apc,
+                                                    fuse->max_uV,
+                                                    fuse->max_uV);
+               if (!ret) {
+                       dev_err(drv->dev,
+                               "max uV: %d (fuse corner: %d) not supported by regulator\n",
+                               fuse->max_uV, i);
+                       return -EINVAL;
+               }
+
+               dev_dbg(drv->dev,
+                       "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+                       i, fuse->min_uV, fuse->uV, fuse->max_uV,
+                       fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+       }
+
+       return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+                                struct cpr_drv *drv,
+                                const struct fuse_corner_data *fdata,
+                                const struct corner *corner)
+{
+       u32 quot_diff = 0;
+       unsigned long freq_diff;
+       int scaling;
+       const struct fuse_corner *fuse, *prev_fuse;
+       int ret;
+
+       fuse = corner->fuse_corner;
+       prev_fuse = fuse - 1;
+
+       if (quot_offset) {
+               ret = cpr_read_efuse(drv->dev, quot_offset, &quot_diff);
+               if (ret)
+                       return ret;
+
+               quot_diff *= fdata->quot_offset_scale;
+               quot_diff += fdata->quot_offset_adjust;
+       } else {
+               quot_diff = fuse->quot - prev_fuse->quot;
+       }
+
+       freq_diff = fuse->max_freq - prev_fuse->max_freq;
+       freq_diff /= 1000000; /* Convert to MHz */
+       scaling = 1000 * quot_diff / freq_diff;
+       return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+                          const struct fuse_corner_data *fdata)
+{
+       unsigned long f_high, f_low, f_diff;
+       int uV_high, uV_low, uV;
+       u64 temp, temp_limit;
+       const struct fuse_corner *fuse, *prev_fuse;
+
+       fuse = corner->fuse_corner;
+       prev_fuse = fuse - 1;
+
+       f_high = fuse->max_freq;
+       f_low = prev_fuse->max_freq;
+       uV_high = fuse->uV;
+       uV_low = prev_fuse->uV;
+       f_diff = fuse->max_freq - corner->freq;
+
+       /*
+        * Don't interpolate in the wrong direction. This could happen
+        * if the adjusted fuse voltage overlaps with the previous fuse's
+        * adjusted voltage.
+        */
+       if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+               return corner->uV;
+
+       temp = f_diff * (uV_high - uV_low);
+       do_div(temp, f_high - f_low);
+
+       /*
+        * max_volt_scale has units of uV/MHz while freq values
+        * have units of Hz.  Divide by 1000000 to convert to.
+        */
+       temp_limit = f_diff * fdata->max_volt_scale;
+       do_div(temp_limit, 1000000);
+
+       uV = uV_high - min(temp, temp_limit);
+       return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+       struct device_node *np;
+       unsigned int fuse_corner = 0;
+
+       np = dev_pm_opp_get_of_node(opp);
+       if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+               pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+                      __func__);
+
+       of_node_put(np);
+
+       return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+                                           struct device *cpu_dev)
+{
+       u64 rate = 0;
+       struct device_node *ref_np;
+       struct device_node *desc_np;
+       struct device_node *child_np = NULL;
+       struct device_node *child_req_np = NULL;
+
+       desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+       if (!desc_np)
+               return 0;
+
+       ref_np = dev_pm_opp_get_of_node(ref);
+       if (!ref_np)
+               goto out_ref;
+
+       do {
+               of_node_put(child_req_np);
+               child_np = of_get_next_available_child(desc_np, child_np);
+               child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+       } while (child_np && child_req_np != ref_np);
+
+       if (child_np && child_req_np == ref_np)
+               of_property_read_u64(child_np, "opp-hz", &rate);
+
+       of_node_put(child_req_np);
+       of_node_put(child_np);
+       of_node_put(ref_np);
+out_ref:
+       of_node_put(desc_np);
+
+       return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+       const struct cpr_desc *desc = drv->desc;
+       const struct cpr_fuse *fuses = drv->cpr_fuses;
+       int i, level, scaling = 0;
+       unsigned int fnum, fc;
+       const char *quot_offset;
+       struct fuse_corner *fuse, *prev_fuse;
+       struct corner *corner, *end;
+       struct corner_data *cdata;
+       const struct fuse_corner_data *fdata;
+       bool apply_scaling;
+       unsigned long freq_diff, freq_diff_mhz;
+       unsigned long freq;
+       int step_volt = regulator_get_linear_step(drv->vdd_apc);
+       struct dev_pm_opp *opp;
+
+       if (!step_volt)
+               return -EINVAL;
+
+       corner = drv->corners;
+       end = &corner[drv->num_corners - 1];
+
+       cdata = devm_kcalloc(drv->dev, drv->num_corners,
+                            sizeof(struct corner_data),
+                            GFP_KERNEL);
+       if (!cdata)
+               return -ENOMEM;
+
+       /*
+        * Store maximum frequency for each fuse corner based on the frequency
+        * plan
+        */
+       for (level = 1; level <= drv->num_corners; level++) {
+               opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+               if (IS_ERR(opp))
+                       return -EINVAL;
+               fc = cpr_get_fuse_corner(opp);
+               if (!fc) {
+                       dev_pm_opp_put(opp);
+                       return -EINVAL;
+               }
+               fnum = fc - 1;
+               freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+               if (!freq) {
+                       dev_pm_opp_put(opp);
+                       return -EINVAL;
+               }
+               cdata[level - 1].fuse_corner = fnum;
+               cdata[level - 1].freq = freq;
+
+               fuse = &drv->fuse_corners[fnum];
+               dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+                       freq, dev_pm_opp_get_level(opp) - 1, fnum);
+               if (freq > fuse->max_freq)
+                       fuse->max_freq = freq;
+               dev_pm_opp_put(opp);
+       }
+
+       /*
+        * Get the quotient adjustment scaling factor, according to:
+        *
+        * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+        *              / (freq(corner_N) - freq(corner_N-1)), max_factor)
+        *
+        * QUOT(corner_N):      quotient read from fuse for fuse corner N
+        * QUOT(corner_N-1):    quotient read from fuse for fuse corner (N - 1)
+        * freq(corner_N):      max frequency in MHz supported by fuse corner N
+        * freq(corner_N-1):    max frequency in MHz supported by fuse corner
+        *                       (N - 1)
+        *
+        * Then walk through the corners mapped to each fuse corner
+        * and calculate the quotient adjustment for each one using the
+        * following formula:
+        *
+        * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+        *
+        * freq_max: max frequency in MHz supported by the fuse corner
+        * freq_corner: frequency in MHz corresponding to the corner
+        * scaling: calculated from above equation
+        *
+        *
+        *     +                           +
+        *     |                         v |
+        *   q |           f c           o |           f c
+        *   u |         c               l |         c
+        *   o |       f                 t |       f
+        *   t |     c                   a |     c
+        *     | c f                     g | c f
+        *     |                         e |
+        *     +---------------            +----------------
+        *       0 1 2 3 4 5 6               0 1 2 3 4 5 6
+        *          corner                      corner
+        *
+        *    c = corner
+        *    f = fuse corner
+        *
+        */
+       for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+               fnum = cdata[i].fuse_corner;
+               fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+               quot_offset = fuses[fnum].quotient_offset;
+               fuse = &drv->fuse_corners[fnum];
+               if (fnum)
+                       prev_fuse = &drv->fuse_corners[fnum - 1];
+               else
+                       prev_fuse = NULL;
+
+               corner->fuse_corner = fuse;
+               corner->freq = cdata[i].freq;
+               corner->uV = fuse->uV;
+
+               if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+                       scaling = cpr_calculate_scaling(quot_offset, drv,
+                                                       fdata, corner);
+                       if (scaling < 0)
+                               return scaling;
+
+                       apply_scaling = true;
+               } else if (corner->freq == fuse->max_freq) {
+                       /* This is a fuse corner; don't scale anything */
+                       apply_scaling = false;
+               }
+
+               if (apply_scaling) {
+                       freq_diff = fuse->max_freq - corner->freq;
+                       freq_diff_mhz = freq_diff / 1000000;
+                       corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+                       corner->uV = cpr_interpolate(corner, step_volt, fdata);
+               }
+
+               corner->max_uV = fuse->max_uV;
+               corner->min_uV = fuse->min_uV;
+               corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+               corner->last_uV = corner->uV;
+
+               /* Reduce the ceiling voltage if needed */
+               if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+                       corner->max_uV = corner->uV;
+               else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+                       corner->max_uV = max(corner->min_uV, fuse->uV);
+
+               dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+                       corner->min_uV, corner->uV, corner->max_uV,
+                       fuse->quot - corner->quot_adjust);
+       }
+
+       return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+       const struct cpr_desc *desc = drv->desc;
+       struct cpr_fuse *fuses;
+       int i;
+
+       fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+                            sizeof(struct cpr_fuse),
+                            GFP_KERNEL);
+       if (!fuses)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < desc->num_fuse_corners; i++) {
+               char tbuf[32];
+
+               snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+               fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+               if (!fuses[i].ring_osc)
+                       return ERR_PTR(-ENOMEM);
+
+               snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+               fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+                                                    GFP_KERNEL);
+               if (!fuses[i].init_voltage)
+                       return ERR_PTR(-ENOMEM);
+
+               snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+               fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+               if (!fuses[i].quotient)
+                       return ERR_PTR(-ENOMEM);
+
+               snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+               fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+                                                       GFP_KERNEL);
+               if (!fuses[i].quotient_offset)
+                       return ERR_PTR(-ENOMEM);
+       }
+
+       return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+       drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+       const struct cpr_desc *desc = drv->desc;
+       struct clk *clk;
+
+       clk = clk_get(drv->dev, "ref");
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+       clk_put(clk);
+
+       if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+           desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+           desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+           desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+           desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+           desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+               return -EINVAL;
+
+       dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+               desc->up_threshold, desc->down_threshold);
+
+       return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+       unsigned long rate;
+       const struct corner *end;
+       struct corner *iter;
+       unsigned int i = 0;
+
+       if (!drv->cpu_clk) {
+               dev_err(drv->dev, "cannot get rate from NULL clk\n");
+               return -EINVAL;
+       }
+
+       end = &drv->corners[drv->num_corners - 1];
+       rate = clk_get_rate(drv->cpu_clk);
+
+       /*
+        * Some bootloaders set a CPU clock frequency that is not defined
+        * in the OPP table. When running at an unlisted frequency,
+        * cpufreq_online() will change to the OPP which has the lowest
+        * frequency, at or above the unlisted frequency.
+        * Since cpufreq_online() always "rounds up" in the case of an
+        * unlisted frequency, this function always "rounds down" in case
+        * of an unlisted frequency. That way, when cpufreq_online()
+        * triggers the first ever call to cpr_set_performance_state(),
+        * it will correctly determine the direction as UP.
+        */
+       for (iter = drv->corners; iter <= end; iter++) {
+               if (iter->freq > rate)
+                       break;
+               i++;
+               if (iter->freq == rate) {
+                       drv->corner = iter;
+                       break;
+               }
+               if (iter->freq < rate)
+                       drv->corner = iter;
+       }
+
+       if (!drv->corner) {
+               dev_err(drv->dev, "boot up corner not found\n");
+               return -EINVAL;
+       }
+
+       dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+       return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+       .num_fuse_corners = 3,
+       .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+       .step_quot = (int []){ 25, 25, 25, },
+       .timer_delay_us = 5000,
+       .timer_cons_up = 0,
+       .timer_cons_down = 2,
+       .up_threshold = 1,
+       .down_threshold = 3,
+       .idle_clocks = 15,
+       .gcnt_us = 1,
+       .vdd_apc_step_up_limit = 1,
+       .vdd_apc_step_down_limit = 1,
+       .cpr_fuses = {
+               .init_voltage_step = 8000,
+               .init_voltage_width = 6,
+               .fuse_corner_data = (struct fuse_corner_data[]){
+                       /* fuse corner 0 */
+                       {
+                               .ref_uV = 1224000,
+                               .max_uV = 1224000,
+                               .min_uV = 1048000,
+                               .max_volt_scale = 0,
+                               .max_quot_scale = 0,
+                               .quot_offset = 0,
+                               .quot_scale = 1,
+                               .quot_adjust = 0,
+                               .quot_offset_scale = 5,
+                               .quot_offset_adjust = 0,
+                       },
+                       /* fuse corner 1 */
+                       {
+                               .ref_uV = 1288000,
+                               .max_uV = 1288000,
+                               .min_uV = 1048000,
+                               .max_volt_scale = 2000,
+                               .max_quot_scale = 1400,
+                               .quot_offset = 0,
+                               .quot_scale = 1,
+                               .quot_adjust = -20,
+                               .quot_offset_scale = 5,
+                               .quot_offset_adjust = 0,
+                       },
+                       /* fuse corner 2 */
+                       {
+                               .ref_uV = 1352000,
+                               .max_uV = 1384000,
+                               .min_uV = 1088000,
+                               .max_volt_scale = 2000,
+                               .max_quot_scale = 1400,
+                               .quot_offset = 0,
+                               .quot_scale = 1,
+                               .quot_adjust = 0,
+                               .quot_offset_scale = 5,
+                               .quot_offset_adjust = 0,
+                       },
+               },
+       },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+       .settings = (struct reg_sequence[]){
+               { 0xb120, 0x1041040 },
+               { 0xb124, 0x41 },
+               { 0xb120, 0x0 },
+               { 0xb124, 0x0 },
+               { 0xb120, 0x0 },
+               { 0xb124, 0x0 },
+       },
+       .config = (struct reg_sequence[]){
+               { 0xb138, 0xff },
+               { 0xb130, 0x5555 },
+       },
+       .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+       .cpr_desc = &qcs404_cpr_desc,
+       .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+                                             struct dev_pm_opp *opp)
+{
+       return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+       struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+       return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+       struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+       return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+                            struct device *dev)
+{
+       struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+       const struct acc_desc *acc_desc = drv->acc_desc;
+       int ret = 0;
+
+       mutex_lock(&drv->lock);
+
+       dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+       /*
+        * This driver only supports scaling voltage for a CPU cluster
+        * where all CPUs in the cluster share a single regulator.
+        * Therefore, save the struct device pointer only for the first
+        * CPU device that gets attached. There is no need to do any
+        * additional initialization when further CPUs get attached.
+        */
+       if (drv->attached_cpu_dev)
+               goto unlock;
+
+       /*
+        * cpr_scale_voltage() requires the direction (if we are changing
+        * to a higher or lower OPP). The first time
+        * cpr_set_performance_state() is called, there is no previous
+        * performance state defined. Therefore, we call
+        * cpr_find_initial_corner() that gets the CPU clock frequency
+        * set by the bootloader, so that we can determine the direction
+        * the first time cpr_set_performance_state() is called.
+        */
+       drv->cpu_clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(drv->cpu_clk)) {
+               ret = PTR_ERR(drv->cpu_clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+               goto unlock;
+       }
+       drv->attached_cpu_dev = dev;
+
+       dev_dbg(drv->dev, "using cpu clk from: %s\n",
+               dev_name(drv->attached_cpu_dev));
+
+       /*
+        * Everything related to (virtual) corners has to be initialized
+        * here, when attaching to the power domain, since we need to know
+        * the maximum frequency for each fuse corner, and this is only
+        * available after the cpufreq driver has attached to us.
+        * The reason for this is that we need to know the highest
+        * frequency associated with each fuse corner.
+        */
+       ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+       if (ret < 0) {
+               dev_err(drv->dev, "could not get OPP count\n");
+               goto unlock;
+       }
+       drv->num_corners = ret;
+
+       if (drv->num_corners < 2) {
+               dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners);
+
+       drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+                                   sizeof(*drv->corners),
+                                   GFP_KERNEL);
+       if (!drv->corners) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       ret = cpr_corner_init(drv);
+       if (ret)
+               goto unlock;
+
+       cpr_set_loop_allowed(drv);
+
+       ret = cpr_init_parameters(drv);
+       if (ret)
+               goto unlock;
+
+       /* Configure CPR HW but keep it disabled */
+       ret = cpr_config(drv);
+       if (ret)
+               goto unlock;
+
+       ret = cpr_find_initial_corner(drv);
+       if (ret)
+               goto unlock;
+
+       if (acc_desc->config)
+               regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+                                      acc_desc->num_regs_per_fuse);
+
+       /* Enable ACC if required */
+       if (acc_desc->enable_mask)
+               regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+                                  acc_desc->enable_mask,
+                                  acc_desc->enable_mask);
+
+unlock:
+       mutex_unlock(&drv->lock);
+
+       return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+       u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+       u32 step_dn, step_up, error, error_lt0, busy;
+       struct cpr_drv *drv = s->private;
+       struct fuse_corner *fuse_corner;
+       struct corner *corner;
+
+       corner = drv->corner;
+       fuse_corner = corner->fuse_corner;
+
+       seq_printf(s, "corner, current_volt = %d uV\n",
+                      corner->last_uV);
+
+       ro_sel = fuse_corner->ring_osc_idx;
+       gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+       seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+       ctl = cpr_read(drv, REG_RBCPR_CTL);
+       seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+       irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+       seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+       reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+       seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+       step_dn = reg & 0x01;
+       step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+       seq_printf(s, "  [step_dn = %u", step_dn);
+
+       seq_printf(s, ", step_up = %u", step_up);
+
+       error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+                               & RBCPR_RESULT0_ERROR_STEPS_MASK;
+       seq_printf(s, ", error_steps = %u", error_steps);
+
+       error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+       seq_printf(s, ", error = %u", error);
+
+       error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+       seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+       busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+       seq_printf(s, ", busy = %u]\n", busy);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+       drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+       debugfs_create_file("debug_info", 0444, drv->debugfs,
+                           drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct device *dev = &pdev->dev;
+       struct cpr_drv *drv;
+       int irq, ret;
+       const struct cpr_acc_desc *data;
+       struct device_node *np;
+       u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+       data = of_device_get_match_data(dev);
+       if (!data || !data->cpr_desc || !data->acc_desc)
+               return -EINVAL;
+
+       drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+       if (!drv)
+               return -ENOMEM;
+       drv->dev = dev;
+       drv->desc = data->cpr_desc;
+       drv->acc_desc = data->acc_desc;
+
+       drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+                                        sizeof(*drv->fuse_corners),
+                                        GFP_KERNEL);
+       if (!drv->fuse_corners)
+               return -ENOMEM;
+
+       np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+       if (!np)
+               return -ENODEV;
+
+       drv->tcsr = syscon_node_to_regmap(np);
+       of_node_put(np);
+       if (IS_ERR(drv->tcsr))
+               return PTR_ERR(drv->tcsr);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       drv->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(drv->base))
+               return PTR_ERR(drv->base);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return -EINVAL;
+
+       drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+       if (IS_ERR(drv->vdd_apc))
+               return PTR_ERR(drv->vdd_apc);
+
+       /*
+        * Initialize fuse corners, since it simply depends
+        * on data in efuses.
+        * Everything related to (virtual) corners has to be
+        * initialized after attaching to the power domain,
+        * since it depends on the CPU's OPP table.
+        */
+       ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
+       if (ret)
+               return ret;
+
+       drv->cpr_fuses = cpr_get_fuses(drv);
+       if (IS_ERR(drv->cpr_fuses))
+               return PTR_ERR(drv->cpr_fuses);
+
+       ret = cpr_populate_ring_osc_idx(drv);
+       if (ret)
+               return ret;
+
+       ret = cpr_fuse_corner_init(drv);
+       if (ret)
+               return ret;
+
+       mutex_init(&drv->lock);
+
+       ret = devm_request_threaded_irq(dev, irq, NULL,
+                                       cpr_irq_handler,
+                                       IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+                                       "cpr", drv);
+       if (ret)
+               return ret;
+
+       drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+                                         GFP_KERNEL);
+       if (!drv->pd.name)
+               return -EINVAL;
+
+       drv->pd.power_off = cpr_power_off;
+       drv->pd.power_on = cpr_power_on;
+       drv->pd.set_performance_state = cpr_set_performance_state;
+       drv->pd.opp_to_performance_state = cpr_get_performance_state;
+       drv->pd.attach_dev = cpr_pd_attach_dev;
+
+       ret = pm_genpd_init(&drv->pd, NULL, true);
+       if (ret)
+               return ret;
+
+       ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, drv);
+       cpr_debugfs_init(drv);
+
+       return 0;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+       struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+       if (cpr_is_allowed(drv)) {
+               cpr_ctl_disable(drv);
+               cpr_irq_set(drv, 0);
+       }
+
+       of_genpd_del_provider(pdev->dev.of_node);
+       pm_genpd_remove(&drv->pd);
+
+       debugfs_remove_recursive(drv->debugfs);
+
+       return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+       { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+       { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+       .probe          = cpr_probe,
+       .remove         = cpr_remove,
+       .driver         = {
+               .name   = "qcom-cpr",
+               .of_match_table = cpr_match_table,
+       },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
index a67701ed93e8b065817dcdbe310772a2ce8587d1..73257cf107d973b034b2e3e36076d558f44efe10 100644 (file)
@@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
        INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
        INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
+       INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
 
        INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
        INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
        INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
        INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
        INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
+       INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
 
        INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
        INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
@@ -1295,6 +1297,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        int ret;
 
+       if (!rapl_defaults)
+               return ERR_PTR(-ENODEV);
+
        rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
        if (!rp)
                return ERR_PTR(-ENOMEM);
index b45d2b86d8ca2d1f0d3c3973448827530ccd1256..b0d1b8d264fa56eb0ec8be1bdb8a627f4333926a 100644 (file)
@@ -121,7 +121,7 @@ config PTP_1588_CLOCK_KVM
 
 config PTP_1588_CLOCK_IDTCM
        tristate "IDT CLOCKMATRIX as PTP clock"
-       depends on PTP_1588_CLOCK
+       depends on PTP_1588_CLOCK && I2C
        default n
        help
          This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
index e60eab7f8a616f90a2d0c061368610ef994100c7..b84f16bbd6f24fb03e8268dd5d23c9848fdc8381 100644 (file)
@@ -166,10 +166,11 @@ static struct posix_clock_operations ptp_clock_ops = {
        .read           = ptp_read,
 };
 
-static void delete_ptp_clock(struct posix_clock *pc)
+static void ptp_clock_release(struct device *dev)
 {
-       struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+       struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
 
+       ptp_cleanup_pin_groups(ptp);
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        ida_simple_remove(&ptp_clocks_map, ptp->index);
@@ -213,7 +214,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        }
 
        ptp->clock.ops = ptp_clock_ops;
-       ptp->clock.release = delete_ptp_clock;
        ptp->info = info;
        ptp->devid = MKDEV(major, index);
        ptp->index = index;
@@ -236,15 +236,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        if (err)
                goto no_pin_groups;
 
-       /* Create a new device in our class. */
-       ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
-                                            ptp, ptp->pin_attr_groups,
-                                            "ptp%d", ptp->index);
-       if (IS_ERR(ptp->dev)) {
-               err = PTR_ERR(ptp->dev);
-               goto no_device;
-       }
-
        /* Register a new PPS source. */
        if (info->pps) {
                struct pps_source_info pps;
@@ -260,8 +251,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                }
        }
 
-       /* Create a posix clock. */
-       err = posix_clock_register(&ptp->clock, ptp->devid);
+       /* Initialize a new device of our class in our clock structure. */
+       device_initialize(&ptp->dev);
+       ptp->dev.devt = ptp->devid;
+       ptp->dev.class = ptp_class;
+       ptp->dev.parent = parent;
+       ptp->dev.groups = ptp->pin_attr_groups;
+       ptp->dev.release = ptp_clock_release;
+       dev_set_drvdata(&ptp->dev, ptp);
+       dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+       /* Create a posix clock and link it to the device. */
+       err = posix_clock_register(&ptp->clock, &ptp->dev);
        if (err) {
                pr_err("failed to create posix clock\n");
                goto no_clock;
@@ -273,8 +274,6 @@ no_clock:
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
 no_pps:
-       device_destroy(ptp_class, ptp->devid);
-no_device:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
        if (ptp->kworker)
@@ -304,10 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
 
-       device_destroy(ptp_class, ptp->devid);
-       ptp_cleanup_pin_groups(ptp);
-
        posix_clock_unregister(&ptp->clock);
+
        return 0;
 }
 EXPORT_SYMBOL(ptp_clock_unregister);
index 9171d42468fdb05321814f1bae8c239ebe22693a..6b97155148f11931b89c95e7c9d84e0f0e960470 100644 (file)
@@ -28,7 +28,7 @@ struct timestamp_event_queue {
 
 struct ptp_clock {
        struct posix_clock clock;
-       struct device *dev;
+       struct device dev;
        struct ptp_clock_info *info;
        dev_t devid;
        int index; /* index into clocks.map */
index 74eb5af7295f54cf62903367eff85cbd0220dc0f..97bfdd47954fb1185e03279411d3833334098188 100644 (file)
@@ -194,6 +194,18 @@ config REGULATOR_BD70528
          This driver can also be built as a module. If so, the module
          will be called bd70528-regulator.
 
+config REGULATOR_BD71828
+       tristate "ROHM BD71828 Power Regulator"
+       depends on MFD_ROHM_BD71828
+       select REGULATOR_ROHM
+       help
+         This driver supports voltage regulators on ROHM BD71828 PMIC.
+         This will enable support for the software controllable buck
+         and LDO regulators.
+
+         This driver can also be built as a module. If so, the module
+         will be called bd71828-regulator.
+
 config REGULATOR_BD718XX
        tristate "ROHM BD71837 Power Regulator"
        depends on MFD_ROHM_BD718XX
@@ -600,6 +612,27 @@ config REGULATOR_MCP16502
          through the regulator interface. In addition it enables
          suspend-to-ram/standby transition.
 
+config REGULATOR_MP8859
+       tristate "MPS MP8859 regulator driver"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Say y here to support the MP8859 voltage regulator. This driver
+         supports basic operations (get/set voltage) through the regulator
+         interface.
+         Say M here if you want to include support for the regulator as a
+         module. The module will be named "mp8859".
+
+config REGULATOR_MPQ7920
+       tristate "Monolithic MPQ7920 PMIC"
+       depends on I2C && OF
+       select REGMAP_I2C
+       help
+         Say y here to support the MPQ7920 PMIC. This will enable supports
+         the software controllable 4 buck and 5 LDO regulators.
+         This driver supports the control of different power rails of device
+         through regulator interface.
+
 config REGULATOR_MT6311
        tristate "MediaTek MT6311 PMIC"
        depends on I2C
@@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS
          This driver provides support for voltage regulators available
          on the ARM Ltd's Versatile Express platform.
 
+config REGULATOR_VQMMC_IPQ4019
+       tristate "IPQ4019 VQMMC SD LDO regulator support"
+       depends on ARCH_QCOM
+       help
+         This driver provides support for the VQMMC LDO I/0
+         voltage regulator of the IPQ4019 SD/EMMC controller.
+
 config REGULATOR_WM831X
        tristate "Wolfson Microelectronics WM831x PMIC regulators"
        depends on MFD_WM831X
index 2210ba56f9bd115e6229e02861d7009358d977dd..07bc977c52b04df1cb6de1478f480872607b1452 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
 obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
 obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
 obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
+obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
 obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
 obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
 obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
@@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
+obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
+obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
 obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
 obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
 obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
@@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
 obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
 obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
 obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
+obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
index 989506bd90b193bc696abf90096e0c4ae1a37efa..16f0c857003606958dd0f1a415d27327eb182847 100644 (file)
@@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
                int i;
 
                for (i = 0; i < rate_count; i++) {
-                       if (ramp <= slew_rates[i])
-                               cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
-                       else
+                       if (ramp > slew_rates[i])
                                break;
+
+                       if (id == AXP20X_DCDC2)
+                               cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
+                       else
+                               cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
                }
 
                if (cfg == 0xff) {
@@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = {
                 AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
        AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
                 AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
-                AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
+                AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
        AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
                 AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
                 AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
index ec764022621f194ce41045eaddaf6203f71e3ef4..5bf8a2dc5fe778302d3e79525e1ef4d4f70f3282 100644 (file)
@@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_time_sel = regulator_set_voltage_time_sel,
-       .set_ramp_delay = bd70528_set_ramp_delay,
 };
 
 static const struct regulator_ops bd70528_led_ops = {
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
new file mode 100644 (file)
index 0000000..b2fa17b
--- /dev/null
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019 ROHM Semiconductors
+// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver
+//
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct reg_init {
+       unsigned int reg;
+       unsigned int mask;
+       unsigned int val;
+};
+struct bd71828_regulator_data {
+       struct regulator_desc desc;
+       const struct rohm_dvs_config dvs;
+       const struct reg_init *reg_inits;
+       int reg_init_amnt;
+};
+
+static const struct reg_init buck1_inits[] = {
+       /*
+        * DVS Buck voltages can be changed by register values or via GPIO.
+        * Use register accesses by default.
+        */
+       {
+               .reg = BD71828_REG_PS_CTRL_1,
+               .mask = BD71828_MASK_DVS_BUCK1_CTRL,
+               .val = BD71828_DVS_BUCK1_CTRL_I2C,
+       },
+};
+
+static const struct reg_init buck2_inits[] = {
+       {
+               .reg = BD71828_REG_PS_CTRL_1,
+               .mask = BD71828_MASK_DVS_BUCK2_CTRL,
+               .val = BD71828_DVS_BUCK2_CTRL_I2C,
+       },
+};
+
+static const struct reg_init buck6_inits[] = {
+       {
+               .reg = BD71828_REG_PS_CTRL_1,
+               .mask = BD71828_MASK_DVS_BUCK6_CTRL,
+               .val = BD71828_DVS_BUCK6_CTRL_I2C,
+       },
+};
+
+static const struct reg_init buck7_inits[] = {
+       {
+               .reg = BD71828_REG_PS_CTRL_1,
+               .mask = BD71828_MASK_DVS_BUCK7_CTRL,
+               .val = BD71828_DVS_BUCK7_CTRL_I2C,
+       },
+};
+
+static const struct regulator_linear_range bd71828_buck1267_volts[] = {
+       REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250),
+       REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck3_volts[] = {
+       REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000),
+       REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck4_volts[] = {
+       REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000),
+       REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck5_volts[] = {
+       REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000),
+       REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_ldo_volts[] = {
+       REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000),
+       REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0),
+};
+
+static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+       unsigned int val;
+
+       switch (ramp_delay) {
+       case 1 ... 2500:
+               val = 0;
+               break;
+       case 2501 ... 5000:
+               val = 1;
+               break;
+       case 5001 ... 10000:
+               val = 2;
+               break;
+       case 10001 ... 20000:
+               val = 3;
+               break;
+       default:
+               val = 3;
+               dev_err(&rdev->dev,
+                       "ramp_delay: %d not supported, setting 20mV/uS",
+                        ramp_delay);
+       }
+
+       /*
+        * On BD71828 the ramp delay level control reg is at offset +2 to
+        * enable reg
+        */
+       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2,
+                                 BD71828_MASK_RAMP_DELAY,
+                                 val << (ffs(BD71828_MASK_RAMP_DELAY) - 1));
+}
+
+static int buck_set_hw_dvs_levels(struct device_node *np,
+                                 const struct regulator_desc *desc,
+                                 struct regulator_config *cfg)
+{
+       struct bd71828_regulator_data *data;
+
+       data = container_of(desc, struct bd71828_regulator_data, desc);
+
+       return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
+}
+
+static int ldo6_parse_dt(struct device_node *np,
+                        const struct regulator_desc *desc,
+                        struct regulator_config *cfg)
+{
+       int ret, i;
+       uint32_t uv = 0;
+       unsigned int en;
+       struct regmap *regmap = cfg->regmap;
+       static const char * const props[] = { "rohm,dvs-run-voltage",
+                                             "rohm,dvs-idle-voltage",
+                                             "rohm,dvs-suspend-voltage",
+                                             "rohm,dvs-lpsr-voltage" };
+       unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN,
+                              BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN };
+
+       for (i = 0; i < ARRAY_SIZE(props); i++) {
+               ret = of_property_read_u32(np, props[i], &uv);
+               if (ret) {
+                       if (ret != -EINVAL)
+                               return ret;
+                       continue;
+               }
+               if (uv)
+                       en = 0xffffffff;
+               else
+                       en = 0;
+
+               ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+static const struct regulator_ops bd71828_buck_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_dvs_buck_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = bd71828_set_ramp_delay,
+};
+
+static const struct regulator_ops bd71828_ldo_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_ldo6_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct bd71828_regulator_data bd71828_rdata[] = {
+       {
+               .desc = {
+                       .name = "buck1",
+                       .of_match = of_match_ptr("BUCK1"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK1,
+                       .ops = &bd71828_dvs_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck1267_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+                       .n_voltages = BD71828_BUCK1267_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK1_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK1_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK1_VOLT,
+                       .run_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+                       /*
+                        * LPSR voltage is same as SUSPEND voltage. Allow
+                        * setting it so that regulator can be set enabled at
+                        * LPSR state
+                        */
+                       .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+               },
+               .reg_inits = buck1_inits,
+               .reg_init_amnt = ARRAY_SIZE(buck1_inits),
+       },
+       {
+               .desc = {
+                       .name = "buck2",
+                       .of_match = of_match_ptr("BUCK2"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK2,
+                       .ops = &bd71828_dvs_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck1267_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+                       .n_voltages = BD71828_BUCK1267_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK2_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK2_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK2_VOLT,
+                       .run_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+                       .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+               },
+               .reg_inits = buck2_inits,
+               .reg_init_amnt = ARRAY_SIZE(buck2_inits),
+       },
+       {
+               .desc = {
+                       .name = "buck3",
+                       .of_match = of_match_ptr("BUCK3"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK3,
+                       .ops = &bd71828_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck3_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts),
+                       .n_voltages = BD71828_BUCK3_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK3_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK3_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK3_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * BUCK3 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK3_VOLT,
+                       .idle_reg = BD71828_REG_BUCK3_VOLT,
+                       .suspend_reg = BD71828_REG_BUCK3_VOLT,
+                       .lpsr_reg = BD71828_REG_BUCK3_VOLT,
+                       .run_mask = BD71828_MASK_BUCK3_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK3_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK3_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+       },
+       {
+               .desc = {
+                       .name = "buck4",
+                       .of_match = of_match_ptr("BUCK4"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK4,
+                       .ops = &bd71828_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck4_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts),
+                       .n_voltages = BD71828_BUCK4_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK4_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK4_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK4_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * BUCK4 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK4_VOLT,
+                       .idle_reg = BD71828_REG_BUCK4_VOLT,
+                       .suspend_reg = BD71828_REG_BUCK4_VOLT,
+                       .lpsr_reg = BD71828_REG_BUCK4_VOLT,
+                       .run_mask = BD71828_MASK_BUCK4_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK4_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK4_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+       },
+       {
+               .desc = {
+                       .name = "buck5",
+                       .of_match = of_match_ptr("BUCK5"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK5,
+                       .ops = &bd71828_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck5_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts),
+                       .n_voltages = BD71828_BUCK5_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK5_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK5_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK5_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * BUCK5 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK5_VOLT,
+                       .idle_reg = BD71828_REG_BUCK5_VOLT,
+                       .suspend_reg = BD71828_REG_BUCK5_VOLT,
+                       .lpsr_reg = BD71828_REG_BUCK5_VOLT,
+                       .run_mask = BD71828_MASK_BUCK5_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK5_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK5_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+       },
+       {
+               .desc = {
+                       .name = "buck6",
+                       .of_match = of_match_ptr("BUCK6"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK6,
+                       .ops = &bd71828_dvs_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck1267_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+                       .n_voltages = BD71828_BUCK1267_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK6_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK6_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK6_VOLT,
+                       .run_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+                       .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+               },
+               .reg_inits = buck6_inits,
+               .reg_init_amnt = ARRAY_SIZE(buck6_inits),
+       },
+       {
+               .desc = {
+                       .name = "buck7",
+                       .of_match = of_match_ptr("BUCK7"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_BUCK7,
+                       .ops = &bd71828_dvs_buck_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_buck1267_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+                       .n_voltages = BD71828_BUCK1267_VOLTS,
+                       .enable_reg = BD71828_REG_BUCK7_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_BUCK7_VOLT,
+                       .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_BUCK7_VOLT,
+                       .run_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT,
+                       .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+                       .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+                       .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+                       .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+               },
+               .reg_inits = buck7_inits,
+               .reg_init_amnt = ARRAY_SIZE(buck7_inits),
+       },
+       {
+               .desc = {
+                       .name = "ldo1",
+                       .of_match = of_match_ptr("LDO1"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO1,
+                       .ops = &bd71828_ldo_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_ldo_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+                       .n_voltages = BD71828_LDO_VOLTS,
+                       .enable_reg = BD71828_REG_LDO1_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_LDO1_VOLT,
+                       .vsel_mask = BD71828_MASK_LDO_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * LDO1 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_LDO1_VOLT,
+                       .idle_reg = BD71828_REG_LDO1_VOLT,
+                       .suspend_reg = BD71828_REG_LDO1_VOLT,
+                       .lpsr_reg = BD71828_REG_LDO1_VOLT,
+                       .run_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_mask = BD71828_MASK_LDO_VOLT,
+                       .suspend_mask = BD71828_MASK_LDO_VOLT,
+                       .lpsr_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+       }, {
+               .desc = {
+                       .name = "ldo2",
+                       .of_match = of_match_ptr("LDO2"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO2,
+                       .ops = &bd71828_ldo_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_ldo_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+                       .n_voltages = BD71828_LDO_VOLTS,
+                       .enable_reg = BD71828_REG_LDO2_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_LDO2_VOLT,
+                       .vsel_mask = BD71828_MASK_LDO_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * LDO2 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_LDO2_VOLT,
+                       .idle_reg = BD71828_REG_LDO2_VOLT,
+                       .suspend_reg = BD71828_REG_LDO2_VOLT,
+                       .lpsr_reg = BD71828_REG_LDO2_VOLT,
+                       .run_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_mask = BD71828_MASK_LDO_VOLT,
+                       .suspend_mask = BD71828_MASK_LDO_VOLT,
+                       .lpsr_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+       }, {
+               .desc = {
+                       .name = "ldo3",
+                       .of_match = of_match_ptr("LDO3"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO3,
+                       .ops = &bd71828_ldo_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_ldo_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+                       .n_voltages = BD71828_LDO_VOLTS,
+                       .enable_reg = BD71828_REG_LDO3_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_LDO3_VOLT,
+                       .vsel_mask = BD71828_MASK_LDO_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * LDO3 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_LDO3_VOLT,
+                       .idle_reg = BD71828_REG_LDO3_VOLT,
+                       .suspend_reg = BD71828_REG_LDO3_VOLT,
+                       .lpsr_reg = BD71828_REG_LDO3_VOLT,
+                       .run_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_mask = BD71828_MASK_LDO_VOLT,
+                       .suspend_mask = BD71828_MASK_LDO_VOLT,
+                       .lpsr_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+
+       }, {
+               .desc = {
+                       .name = "ldo4",
+                       .of_match = of_match_ptr("LDO4"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO4,
+                       .ops = &bd71828_ldo_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_ldo_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+                       .n_voltages = BD71828_LDO_VOLTS,
+                       .enable_reg = BD71828_REG_LDO4_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_LDO4_VOLT,
+                       .vsel_mask = BD71828_MASK_LDO_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * LDO1 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_LDO4_VOLT,
+                       .idle_reg = BD71828_REG_LDO4_VOLT,
+                       .suspend_reg = BD71828_REG_LDO4_VOLT,
+                       .lpsr_reg = BD71828_REG_LDO4_VOLT,
+                       .run_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_mask = BD71828_MASK_LDO_VOLT,
+                       .suspend_mask = BD71828_MASK_LDO_VOLT,
+                       .lpsr_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+       }, {
+               .desc = {
+                       .name = "ldo5",
+                       .of_match = of_match_ptr("LDO5"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO5,
+                       .ops = &bd71828_ldo_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_ldo_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+                       .n_voltages = BD71828_LDO_VOLTS,
+                       .enable_reg = BD71828_REG_LDO5_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_LDO5_VOLT,
+                       .vsel_mask = BD71828_MASK_LDO_VOLT,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+                       .owner = THIS_MODULE,
+               },
+               /*
+                * LDO5 is special. It can choose vsel settings to be configured
+                * from 2 different registers (by GPIO).
+                *
+                * This driver supports only configuration where
+                * BD71828_REG_LDO5_VOLT_L is used.
+                */
+               .dvs = {
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_LDO5_VOLT,
+                       .idle_reg = BD71828_REG_LDO5_VOLT,
+                       .suspend_reg = BD71828_REG_LDO5_VOLT,
+                       .lpsr_reg = BD71828_REG_LDO5_VOLT,
+                       .run_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_mask = BD71828_MASK_LDO_VOLT,
+                       .suspend_mask = BD71828_MASK_LDO_VOLT,
+                       .lpsr_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+
+       }, {
+               .desc = {
+                       .name = "ldo6",
+                       .of_match = of_match_ptr("LDO6"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO6,
+                       .ops = &bd71828_ldo6_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .fixed_uV = BD71828_LDO_6_VOLTAGE,
+                       .n_voltages = 1,
+                       .enable_reg = BD71828_REG_LDO6_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .owner = THIS_MODULE,
+                       /*
+                        * LDO6 only supports enable/disable for all states.
+                        * Voltage for LDO6 is fixed.
+                        */
+                       .of_parse_cb = ldo6_parse_dt,
+               },
+       }, {
+               .desc = {
+                       /* SNVS LDO in data-sheet */
+                       .name = "ldo7",
+                       .of_match = of_match_ptr("LDO7"),
+                       .regulators_node = of_match_ptr("regulators"),
+                       .id = BD71828_LDO_SNVS,
+                       .ops = &bd71828_ldo_ops,
+                       .type = REGULATOR_VOLTAGE,
+                       .linear_ranges = bd71828_ldo_volts,
+                       .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+                       .n_voltages = BD71828_LDO_VOLTS,
+                       .enable_reg = BD71828_REG_LDO7_EN,
+                       .enable_mask = BD71828_MASK_RUN_EN,
+                       .vsel_reg = BD71828_REG_LDO7_VOLT,
+                       .vsel_mask = BD71828_MASK_LDO_VOLT,
+                       .owner = THIS_MODULE,
+                       .of_parse_cb = buck_set_hw_dvs_levels,
+               },
+               .dvs = {
+                       /*
+                        * LDO7 only supports single voltage for all states.
+                        * voltage can be individually enabled for each state
+                        * though => allow setting all states to support
+                        * enabling power rail on different states.
+                        */
+                       .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+                                    ROHM_DVS_LEVEL_SUSPEND |
+                                    ROHM_DVS_LEVEL_LPSR,
+                       .run_reg = BD71828_REG_LDO7_VOLT,
+                       .idle_reg = BD71828_REG_LDO7_VOLT,
+                       .suspend_reg = BD71828_REG_LDO7_VOLT,
+                       .lpsr_reg = BD71828_REG_LDO7_VOLT,
+                       .run_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_mask = BD71828_MASK_LDO_VOLT,
+                       .suspend_mask = BD71828_MASK_LDO_VOLT,
+                       .lpsr_mask = BD71828_MASK_LDO_VOLT,
+                       .idle_on_mask = BD71828_MASK_IDLE_EN,
+                       .suspend_on_mask = BD71828_MASK_SUSP_EN,
+                       .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+               },
+
+       },
+};
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+       struct rohm_regmap_dev *bd71828;
+       int i, j, ret;
+       struct regulator_config config = {
+               .dev = pdev->dev.parent,
+       };
+
+       bd71828 = dev_get_drvdata(pdev->dev.parent);
+       if (!bd71828) {
+               dev_err(&pdev->dev, "No MFD driver data\n");
+               return -EINVAL;
+       }
+
+       config.regmap = bd71828->regmap;
+
+       for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) {
+               struct regulator_dev *rdev;
+               const struct bd71828_regulator_data *rd;
+
+               rd = &bd71828_rdata[i];
+               rdev = devm_regulator_register(&pdev->dev,
+                                              &rd->desc, &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev,
+                               "failed to register %s regulator\n",
+                               rd->desc.name);
+                       return PTR_ERR(rdev);
+               }
+               for (j = 0; j < rd->reg_init_amnt; j++) {
+                       ret = regmap_update_bits(bd71828->regmap,
+                                                rd->reg_inits[j].reg,
+                                                rd->reg_inits[j].mask,
+                                                rd->reg_inits[j].val);
+                       if (ret) {
+                               dev_err(&pdev->dev,
+                                       "regulator %s init failed\n",
+                                       rd->desc.name);
+                               return ret;
+                       }
+               }
+       }
+       return 0;
+}
+
+static struct platform_driver bd71828_regulator = {
+       .driver = {
+               .name = "bd71828-pmic"
+       },
+       .probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-pmic");
index 13a43eee2e46b72141aef323205e8705b594c22d..8f9b2d8eaf10c98e51e53e29b94f00f47a30197f 100644 (file)
@@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
        },
 };
 
-struct bd718xx_pmic_inits {
-       const struct bd718xx_regulator_data *r_datas;
-       unsigned int r_amount;
-};
-
 static int bd718xx_probe(struct platform_device *pdev)
 {
        struct bd718xx *mfd;
        struct regulator_config config = { 0 };
-       struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = {
-               [ROHM_CHIP_TYPE_BD71837] = {
-                       .r_datas = bd71837_regulators,
-                       .r_amount = ARRAY_SIZE(bd71837_regulators),
-               },
-               [ROHM_CHIP_TYPE_BD71847] = {
-                       .r_datas = bd71847_regulators,
-                       .r_amount = ARRAY_SIZE(bd71847_regulators),
-               },
-       };
-
        int i, j, err;
        bool use_snvs;
+       const struct bd718xx_regulator_data *reg_data;
+       unsigned int num_reg_data;
 
        mfd = dev_get_drvdata(pdev->dev.parent);
        if (!mfd) {
@@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev)
                goto err;
        }
 
-       if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT ||
-           !pmic_regulators[mfd->chip.chip_type].r_datas) {
+       switch (mfd->chip.chip_type) {
+       case ROHM_CHIP_TYPE_BD71837:
+               reg_data = bd71837_regulators;
+               num_reg_data = ARRAY_SIZE(bd71837_regulators);
+               break;
+       case ROHM_CHIP_TYPE_BD71847:
+               reg_data = bd71847_regulators;
+               num_reg_data = ARRAY_SIZE(bd71847_regulators);
+               break;
+       default:
                dev_err(&pdev->dev, "Unsupported chip type\n");
                err = -EINVAL;
                goto err;
@@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev)
                }
        }
 
-       for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) {
+       for (i = 0; i < num_reg_data; i++) {
 
                const struct regulator_desc *desc;
                struct regulator_dev *rdev;
                const struct bd718xx_regulator_data *r;
 
-               r = &pmic_regulators[mfd->chip.chip_type].r_datas[i];
+               r = &reg_data[i];
                desc = &r->desc;
 
                config.dev = pdev->dev.parent;
index 679ad3d2ed23dad04fa8d655965b968a5ea1644e..d015d99cb59d95ab1f5799b6da90930cf1f4cd2a 100644 (file)
@@ -1198,6 +1198,10 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
                        return -EINVAL;
                }
 
+               /* no need to loop voltages if range is continuous */
+               if (rdev->desc->continuous_voltage_range)
+                       return 0;
+
                /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
                for (i = 0; i < count; i++) {
                        int     value;
@@ -1938,8 +1942,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
        regulator = create_regulator(rdev, dev, id);
        if (regulator == NULL) {
                regulator = ERR_PTR(-ENOMEM);
-               put_device(&rdev->dev);
                module_put(rdev->owner);
+               put_device(&rdev->dev);
                return regulator;
        }
 
@@ -2063,13 +2067,13 @@ static void _regulator_put(struct regulator *regulator)
 
        rdev->open_count--;
        rdev->exclusive = 0;
-       put_device(&rdev->dev);
        regulator_unlock(rdev);
 
        kfree_const(regulator->supply_name);
        kfree(regulator);
 
        module_put(rdev->owner);
+       put_device(&rdev->dev);
 }
 
 /**
@@ -3466,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
 
 static int regulator_limit_voltage_step(struct regulator_dev *rdev,
                                        int *current_uV, int *min_uV)
@@ -4030,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
                return ret;
        return ret - rdev->constraints->uV_offset;
 }
+EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
 
 /**
  * regulator_get_voltage - get regulator output voltage
@@ -5002,6 +5008,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
        struct regulator_dev *rdev;
        bool dangling_cfg_gpiod = false;
        bool dangling_of_gpiod = false;
+       bool reg_device_fail = false;
        struct device *dev;
        int ret, i;
 
@@ -5187,7 +5194,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
        dev_set_drvdata(&rdev->dev, rdev);
        ret = device_register(&rdev->dev);
        if (ret != 0) {
-               put_device(&rdev->dev);
+               reg_device_fail = true;
                goto unset_supplies;
        }
 
@@ -5218,7 +5225,10 @@ wash:
 clean:
        if (dangling_of_gpiod)
                gpiod_put(config->ena_gpiod);
-       kfree(rdev);
+       if (reg_device_fail)
+               put_device(&rdev->dev);
+       else
+               kfree(rdev);
        kfree(config);
 rinse:
        if (dangling_cfg_gpiod)
index f9448ed50e0599a853878d67be173828ce73fbdf..0cdeb61865299f5a7ec819d46d62264cf86c4262 100644 (file)
@@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, da9210_dt_ids);
 
-static int da9210_i2c_probe(struct i2c_client *i2c,
-                           const struct i2c_device_id *id)
+static int da9210_i2c_probe(struct i2c_client *i2c)
 {
        struct da9210 *chip;
        struct device *dev = &i2c->dev;
@@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = {
                .name = "da9210",
                .of_match_table = of_match_ptr(da9210_dt_ids),
        },
-       .probe = da9210_i2c_probe,
+       .probe_new = da9210_i2c_probe,
        .id_table = da9210_i2c_id,
 };
 
index 523dc1b9582672c1a50918495f7042fed491953b..2ea4362ffa5c5c559a60c1fd73ae5c5df74cc911 100644 (file)
@@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip)
 /*
  * I2C driver interface functions
  */
-static int da9211_i2c_probe(struct i2c_client *i2c,
-               const struct i2c_device_id *id)
+static int da9211_i2c_probe(struct i2c_client *i2c)
 {
        struct da9211 *chip;
        int error, ret;
@@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = {
                .name = "da9211",
                .of_match_table = of_match_ptr(da9211_dt_ids),
        },
-       .probe = da9211_i2c_probe,
+       .probe_new = da9211_i2c_probe,
        .id_table = da9211_i2c_id,
 };
 
index ca3dc3f3bb292c679e8521d12b082b5e82447349..bb16c465426ef80ad228a4f6efe7b86bc06ef10b 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/regulator/driver.h>
 #include <linux/module.h>
 
+#include "internal.h"
+
 /**
  * regulator_is_enabled_regmap - standard is_enabled() for regmap users
  *
@@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
                consumers[i].supply = supply_names[i];
 }
 EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
+
+/**
+ * regulator_is_equal - test whether two regulators are the same
+ *
+ * @reg1: first regulator to operate on
+ * @reg2: second regulator to operate on
+ */
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+       return reg1->rdev == reg2->rdev;
+}
+EXPORT_SYMBOL_GPL(regulator_is_equal);
index 978f5e903cae1676e778497cb46317fbd5d32606..cfb765986d0d97d096b080e39a87bcf86c352b96 100644 (file)
@@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = {
        .cache_type = REGCACHE_RBTREE,
 };
 
-static int isl9305_i2c_probe(struct i2c_client *i2c,
-                            const struct i2c_device_id *id)
+static int isl9305_i2c_probe(struct i2c_client *i2c)
 {
        struct regulator_config config = { };
        struct isl9305_pdata *pdata = i2c->dev.platform_data;
@@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = {
                .name = "isl9305",
                .of_match_table = of_match_ptr(isl9305_dt_ids),
        },
-       .probe = isl9305_i2c_probe,
+       .probe_new = isl9305_i2c_probe,
        .id_table = isl9305_i2c_id,
 };
 
index bc96e65ef7c0dd3f59035022d43be82d51f15734..8be252f81b091c88282123d37c13a52a5007927e 100644 (file)
@@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971,
        return 0;
 }
 
-static int lp3971_i2c_probe(struct i2c_client *i2c,
-                           const struct i2c_device_id *id)
+static int lp3971_i2c_probe(struct i2c_client *i2c)
 {
        struct lp3971 *lp3971;
        struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
@@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = {
        .driver = {
                .name = "LP3971",
        },
-       .probe    = lp3971_i2c_probe,
+       .probe_new = lp3971_i2c_probe,
        .id_table = lp3971_i2c_id,
 };
 
index d934540eb8c4b23813c94ba13eec8447c7ad441e..e12e52c69e52e9c13bfacf1d8f6cfd073488d05d 100644 (file)
@@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int ltc3676_regulator_probe(struct i2c_client *client,
-                                   const struct i2c_device_id *id)
+static int ltc3676_regulator_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct regulator_init_data *init_data = dev_get_platdata(dev);
@@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = {
                .name = DRIVER_NAME,
                .of_match_table = of_match_ptr(ltc3676_of_match),
        },
-       .probe = ltc3676_regulator_probe,
+       .probe_new = ltc3676_regulator_probe,
        .id_table = ltc3676_i2c_id,
 };
 module_i2c_driver(ltc3676_driver);
index e57fc9197d62077b6375006e19047f22bbff3b95..ac89a412f665ee11d7b6588b86a0bfa73b25c32b 100644 (file)
@@ -386,9 +386,16 @@ static int max77650_regulator_probe(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id max77650_regulator_of_match[] = {
+       { .compatible = "maxim,max77650-regulator" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_regulator_of_match);
+
 static struct platform_driver max77650_regulator_driver = {
        .driver = {
                .name = "max77650-regulator",
+               .of_match_table = max77650_regulator_of_match,
        },
        .probe = max77650_regulator_probe,
 };
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
new file mode 100644 (file)
index 0000000..1d26b50
--- /dev/null
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 five technologies GmbH
+// Author: Markus Reichl <m.reichl@fivetechno.de>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+
+
+#define VOL_MIN_IDX                    0x00
+#define VOL_MAX_IDX                    0x7ff
+
+/* Register definitions */
+#define MP8859_VOUT_L_REG              0    //3 lo Bits
+#define MP8859_VOUT_H_REG              1    //8 hi Bits
+#define MP8859_VOUT_GO_REG             2
+#define MP8859_IOUT_LIM_REG            3
+#define MP8859_CTL1_REG                        4
+#define MP8859_CTL2_REG                        5
+#define MP8859_RESERVED1_REG           6
+#define MP8859_RESERVED2_REG           7
+#define MP8859_RESERVED3_REG           8
+#define MP8859_STATUS_REG              9
+#define MP8859_INTERRUPT_REG           0x0A
+#define MP8859_MASK_REG                        0x0B
+#define MP8859_ID1_REG                 0x0C
+#define MP8859_MFR_ID_REG              0x27
+#define MP8859_DEV_ID_REG              0x28
+#define MP8859_IC_REV_REG              0x29
+
+#define MP8859_MAX_REG                 0x29
+
+#define MP8859_GO_BIT                  0x01
+
+
+static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+       int ret;
+
+       ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7);
+
+       if (ret)
+               return ret;
+       ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3);
+
+       if (ret)
+               return ret;
+       ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG,
+                                       MP8859_GO_BIT, 1);
+       return ret;
+}
+
+static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
+{
+       unsigned int val_tmp;
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp);
+
+       if (ret)
+               return ret;
+       val = val_tmp << 3;
+
+       ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp);
+
+       if (ret)
+               return ret;
+       val |= val_tmp & 0x07;
+       return val;
+}
+
+static const struct regulator_linear_range mp8859_dcdc_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
+};
+
+static const struct regmap_config mp8859_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = MP8859_MAX_REG,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regulator_ops mp8859_ops = {
+       .set_voltage_sel = mp8859_set_voltage_sel,
+       .get_voltage_sel = mp8859_get_voltage_sel,
+       .list_voltage = regulator_list_voltage_linear_range,
+};
+
+static const struct regulator_desc mp8859_regulators[] = {
+       {
+               .id = 0,
+               .type = REGULATOR_VOLTAGE,
+               .name = "mp8859_dcdc",
+               .of_match = of_match_ptr("mp8859_dcdc"),
+               .n_voltages = VOL_MAX_IDX + 1,
+               .linear_ranges = mp8859_dcdc_ranges,
+               .n_linear_ranges = 1,
+               .ops = &mp8859_ops,
+               .owner = THIS_MODULE,
+       },
+};
+
+static int mp8859_i2c_probe(struct i2c_client *i2c)
+{
+       int ret;
+       struct regulator_config config = {.dev = &i2c->dev};
+       struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
+       struct regulator_dev *rdev;
+
+       if (IS_ERR(regmap)) {
+               ret = PTR_ERR(regmap);
+               dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+               return ret;
+       }
+       rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
+                                       &config);
+
+       if (IS_ERR(rdev)) {
+               ret = PTR_ERR(rdev);
+               dev_err(&i2c->dev, "failed to register %s: %d\n",
+                       mp8859_regulators[0].name, ret);
+               return ret;
+       }
+       return 0;
+}
+
+static const struct of_device_id mp8859_dt_id[] = {
+       {.compatible =  "mps,mp8859"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, mp8859_dt_id);
+
+static const struct i2c_device_id mp8859_i2c_id[] = {
+       { "mp8859", },
+       {  },
+};
+MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id);
+
+static struct i2c_driver mp8859_regulator_driver = {
+       .driver = {
+               .name = "mp8859",
+               .of_match_table = of_match_ptr(mp8859_dt_id),
+       },
+       .probe_new = mp8859_i2c_probe,
+       .id_table = mp8859_i2c_id,
+};
+
+module_i2c_driver(mp8859_regulator_driver);
+
+MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver");
+MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c
new file mode 100644 (file)
index 0000000..54c862e
--- /dev/null
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// mpq7920.c  - regulator driver for mps mpq7920
+//
+// Copyright 2019 Monolithic Power Systems, Inc
+//
+// Author: Saravanan Sekar <sravanhome@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include "mpq7920.h"
+
+#define MPQ7920_BUCK_VOLT_RANGE \
+       ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+#define MPQ7920_LDO_VOLT_RANGE \
+       ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+
+#define MPQ7920BUCK(_name, _id, _ilim)                                 \
+       [MPQ7920_BUCK ## _id] = {                                       \
+               .id = MPQ7920_BUCK ## _id,                              \
+               .name = _name,                                          \
+               .of_match = _name,                                      \
+               .regulators_node = "regulators",                        \
+               .of_parse_cb = mpq7920_parse_cb,                        \
+               .ops = &mpq7920_buck_ops,                               \
+               .min_uV = MPQ7920_BUCK_VOLT_MIN,                        \
+               .uV_step = MPQ7920_VOLT_STEP,                           \
+               .n_voltages = MPQ7920_BUCK_VOLT_RANGE,                  \
+               .curr_table = _ilim,                                    \
+               .n_current_limits = ARRAY_SIZE(_ilim),                  \
+               .csel_reg = MPQ7920_BUCK ##_id## _REG_C,                \
+               .csel_mask = MPQ7920_MASK_BUCK_ILIM,                    \
+               .enable_reg = MPQ7920_REG_REGULATOR_EN,                 \
+               .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET -        \
+                                        MPQ7920_BUCK ## _id),          \
+               .vsel_reg = MPQ7920_BUCK ##_id## _REG_A,                \
+               .vsel_mask = MPQ7920_MASK_VREF,                         \
+               .active_discharge_on    = MPQ7920_DISCHARGE_ON,         \
+               .active_discharge_reg   = MPQ7920_BUCK ##_id## _REG_B,  \
+               .active_discharge_mask  = MPQ7920_MASK_DISCHARGE,       \
+               .soft_start_reg         = MPQ7920_BUCK ##_id## _REG_C,  \
+               .soft_start_mask        = MPQ7920_MASK_SOFTSTART,       \
+               .owner                  = THIS_MODULE,                  \
+       }
+
+#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask)   \
+       [MPQ7920_LDO ## _id] = {                                        \
+               .id = MPQ7920_LDO ## _id,                               \
+               .name = _name,                                          \
+               .of_match = _name,                                      \
+               .regulators_node = "regulators",                        \
+               .ops = _ops,                                            \
+               .min_uV = MPQ7920_LDO_VOLT_MIN,                         \
+               .uV_step = MPQ7920_VOLT_STEP,                           \
+               .n_voltages = MPQ7920_LDO_VOLT_RANGE,                   \
+               .vsel_reg = MPQ7920_LDO ##_id## _REG_A,                 \
+               .vsel_mask = MPQ7920_MASK_VREF,                         \
+               .curr_table = _ilim,                                    \
+               .n_current_limits = _ilim_sz,                           \
+               .csel_reg = _creg,                                      \
+               .csel_mask = _cmask,                                    \
+               .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\
+               .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET -        \
+                                       MPQ7920_LDO ##_id + 1),         \
+               .active_discharge_on    = MPQ7920_DISCHARGE_ON,         \
+               .active_discharge_mask  = MPQ7920_MASK_DISCHARGE,       \
+               .active_discharge_reg   = MPQ7920_LDO ##_id## _REG_B,   \
+               .type                   = REGULATOR_VOLTAGE,            \
+               .owner                  = THIS_MODULE,                  \
+       }
+
+enum mpq7920_regulators {
+       MPQ7920_BUCK1,
+       MPQ7920_BUCK2,
+       MPQ7920_BUCK3,
+       MPQ7920_BUCK4,
+       MPQ7920_LDO1, /* LDORTC */
+       MPQ7920_LDO2,
+       MPQ7920_LDO3,
+       MPQ7920_LDO4,
+       MPQ7920_LDO5,
+       MPQ7920_MAX_REGULATORS,
+};
+
+struct mpq7920_regulator_info {
+       struct regmap *regmap;
+       struct regulator_desc *rdesc;
+};
+
+static const struct regmap_config mpq7920_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0x25,
+};
+
+/* Current limits array (in uA)
+ * ILIM1 & ILIM3
+ */
+static const unsigned int mpq7920_I_limits1[] = {
+       4600000, 6600000, 7600000, 9300000
+};
+
+/* ILIM2 & ILIM4 */
+static const unsigned int mpq7920_I_limits2[] = {
+       2700000, 3900000, 5100000, 6100000
+};
+
+/* LDO4 & LDO5 */
+static const unsigned int mpq7920_I_limits3[] = {
+       300000, 700000
+};
+
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+static int mpq7920_parse_cb(struct device_node *np,
+                               const struct regulator_desc *rdesc,
+                               struct regulator_config *config);
+
+/* RTCLDO not controllable, always ON */
+static const struct regulator_ops mpq7920_ldortc_ops = {
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_wo_current_ops = {
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .set_active_discharge   = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_ops = {
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .set_active_discharge   = regulator_set_active_discharge_regmap,
+       .get_current_limit      = regulator_get_current_limit_regmap,
+       .set_current_limit      = regulator_set_current_limit_regmap,
+};
+
+static const struct regulator_ops mpq7920_buck_ops = {
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .set_active_discharge   = regulator_set_active_discharge_regmap,
+       .set_soft_start         = regulator_set_soft_start_regmap,
+       .set_ramp_delay         = mpq7920_set_ramp_delay,
+};
+
+static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = {
+       MPQ7920BUCK("buck1", 1, mpq7920_I_limits1),
+       MPQ7920BUCK("buck2", 2, mpq7920_I_limits2),
+       MPQ7920BUCK("buck3", 3, mpq7920_I_limits1),
+       MPQ7920BUCK("buck4", 4, mpq7920_I_limits2),
+       MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0),
+       MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+       MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+       MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3,
+                       ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B,
+                       MPQ7920_MASK_LDO_ILIM),
+       MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3,
+                       ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B,
+                       MPQ7920_MASK_LDO_ILIM),
+};
+
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00-01: Reserved
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+       unsigned int ramp_val;
+
+       if (ramp_delay > 8000 || ramp_delay < 0)
+               return -EINVAL;
+
+       if (ramp_delay <= 4000)
+               ramp_val = 3;
+       else
+               ramp_val = 2;
+
+       return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0,
+                                 MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6);
+}
+
+static int mpq7920_parse_cb(struct device_node *np,
+                               const struct regulator_desc *desc,
+                               struct regulator_config *config)
+{
+       uint8_t val;
+       int ret;
+       struct mpq7920_regulator_info *info = config->driver_data;
+       struct regulator_desc *rdesc = &info->rdesc[desc->id];
+
+       if (of_property_read_bool(np, "mps,buck-ovp-disable")) {
+               regmap_update_bits(config->regmap,
+                               MPQ7920_BUCK1_REG_B + (rdesc->id * 4),
+                               MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE);
+       }
+
+       ret = of_property_read_u8(np, "mps,buck-phase-delay", &val);
+       if (!ret) {
+               regmap_update_bits(config->regmap,
+                               MPQ7920_BUCK1_REG_C + (rdesc->id * 4),
+                               MPQ7920_MASK_BUCK_PHASE_DEALY,
+                               (val & 3) << 4);
+       }
+
+       ret = of_property_read_u8(np, "mps,buck-softstart", &val);
+       if (!ret)
+               rdesc->soft_start_val_on = (val & 3) << 2;
+
+       return 0;
+}
+
+static void mpq7920_parse_dt(struct device *dev,
+                struct mpq7920_regulator_info *info)
+{
+       int ret;
+       struct device_node *np = dev->of_node;
+       uint8_t freq;
+
+       np = of_get_child_by_name(np, "regulators");
+       if (!np) {
+               dev_err(dev, "missing 'regulators' subnode in DT\n");
+               return;
+       }
+
+       ret = of_property_read_u8(np, "mps,switch-freq", &freq);
+       if (!ret) {
+               regmap_update_bits(info->regmap, MPQ7920_REG_CTL0,
+                                       MPQ7920_MASK_SWITCH_FREQ,
+                                       (freq & 3) << 4);
+       }
+
+       of_node_put(np);
+}
+
+static int mpq7920_i2c_probe(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct mpq7920_regulator_info *info;
+       struct regulator_config config = { NULL, };
+       struct regulator_dev *rdev;
+       struct regmap *regmap;
+       int i;
+
+       info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info),
+                               GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->rdesc = mpq7920_regulators_desc;
+       regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "Failed to allocate regmap!\n");
+               return PTR_ERR(regmap);
+       }
+
+       i2c_set_clientdata(client, info);
+       info->regmap = regmap;
+       if (client->dev.of_node)
+               mpq7920_parse_dt(&client->dev, info);
+
+       config.dev = dev;
+       config.regmap = regmap;
+       config.driver_data = info;
+
+       for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) {
+               rdev = devm_regulator_register(dev,
+                                              &mpq7920_regulators_desc[i],
+                                              &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(dev, "Failed to register regulator!\n");
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id mpq7920_of_match[] = {
+       { .compatible = "mps,mpq7920"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, mpq7920_of_match);
+
+static const struct i2c_device_id mpq7920_id[] = {
+       { "mpq7920", },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq7920_id);
+
+static struct i2c_driver mpq7920_regulator_driver = {
+       .driver = {
+               .name = "mpq7920",
+               .of_match_table = of_match_ptr(mpq7920_of_match),
+       },
+       .probe_new = mpq7920_i2c_probe,
+       .id_table = mpq7920_id,
+};
+module_i2c_driver(mpq7920_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h
new file mode 100644 (file)
index 0000000..4899246
--- /dev/null
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * mpq7920.h  -  Regulator definitions for mpq7920
+ *
+ * Copyright 2019 Monolithic Power Systems, Inc
+ *
+ */
+
+#ifndef __MPQ7920_H__
+#define __MPQ7920_H__
+
+#define MPQ7920_REG_CTL0               0x00
+#define MPQ7920_REG_CTL1               0x01
+#define MPQ7920_REG_CTL2               0x02
+#define MPQ7920_BUCK1_REG_A            0x03
+#define MPQ7920_BUCK1_REG_B            0x04
+#define MPQ7920_BUCK1_REG_C            0x05
+#define MPQ7920_BUCK1_REG_D            0x06
+#define MPQ7920_BUCK2_REG_A            0x07
+#define MPQ7920_BUCK2_REG_B            0x08
+#define MPQ7920_BUCK2_REG_C            0x09
+#define MPQ7920_BUCK2_REG_D            0x0a
+#define MPQ7920_BUCK3_REG_A            0x0b
+#define MPQ7920_BUCK3_REG_B            0x0c
+#define MPQ7920_BUCK3_REG_C            0x0d
+#define MPQ7920_BUCK3_REG_D            0x0e
+#define MPQ7920_BUCK4_REG_A            0x0f
+#define MPQ7920_BUCK4_REG_B            0x10
+#define MPQ7920_BUCK4_REG_C            0x11
+#define MPQ7920_BUCK4_REG_D            0x12
+#define MPQ7920_LDO1_REG_A             0x13
+#define MPQ7920_LDO1_REG_B             0x0
+#define MPQ7920_LDO2_REG_A             0x14
+#define MPQ7920_LDO2_REG_B             0x15
+#define MPQ7920_LDO2_REG_C             0x16
+#define MPQ7920_LDO3_REG_A             0x17
+#define MPQ7920_LDO3_REG_B             0x18
+#define MPQ7920_LDO3_REG_C             0x19
+#define MPQ7920_LDO4_REG_A             0x1a
+#define MPQ7920_LDO4_REG_B             0x1b
+#define MPQ7920_LDO4_REG_C             0x1c
+#define MPQ7920_LDO5_REG_A             0x1d
+#define MPQ7920_LDO5_REG_B             0x1e
+#define MPQ7920_LDO5_REG_C             0x1f
+#define MPQ7920_REG_MODE               0x20
+#define MPQ7920_REG_REGULATOR_EN       0x22
+
+#define MPQ7920_MASK_VREF              0x7f
+#define MPQ7920_MASK_BUCK_ILIM         0xc0
+#define MPQ7920_MASK_LDO_ILIM          BIT(6)
+#define MPQ7920_MASK_DISCHARGE         BIT(5)
+#define MPQ7920_MASK_MODE              0xc0
+#define MPQ7920_MASK_SOFTSTART         0x0c
+#define MPQ7920_MASK_SWITCH_FREQ       0x30
+#define MPQ7920_MASK_BUCK_PHASE_DEALY  0x30
+#define MPQ7920_MASK_DVS_SLEWRATE      0xc0
+#define MPQ7920_MASK_OVP               0x40
+#define MPQ7920_OVP_DISABLE            ~(0x40)
+#define MPQ7920_DISCHARGE_ON           BIT(5)
+
+#define MPQ7920_REGULATOR_EN_OFFSET    7
+
+/* values in mV */
+#define MPQ7920_BUCK_VOLT_MIN          400000
+#define MPQ7920_LDO_VOLT_MIN           650000
+#define MPQ7920_VOLT_MAX               3587500
+#define MPQ7920_VOLT_STEP              12500
+
+#endif /* __MPQ7920_H__ */
index af95449d3590c81cd2175be940af336f180a707e..69e6af3cd505f46eab87710065c78b7ea4dd89ec 100644 (file)
@@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = {
 /*
  * I2C driver interface functions
  */
-static int mt6311_i2c_probe(struct i2c_client *i2c,
-               const struct i2c_device_id *id)
+static int mt6311_i2c_probe(struct i2c_client *i2c)
 {
        struct regulator_config config = { };
        struct regulator_dev *rdev;
@@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = {
                .name = "mt6311",
                .of_match_table = of_match_ptr(mt6311_dt_ids),
        },
-       .probe = mt6311_i2c_probe,
+       .probe_new = mt6311_i2c_probe,
        .id_table = mt6311_i2c_id,
 };
 
index 3d3415839ba242aba2407173e81a332ae85b2466..787ced91837296f8ec6afcb745a39ad242a08206 100644 (file)
@@ -279,8 +279,7 @@ error_i2c:
 /*
  * I2C driver interface functions
  */
-static int pv88060_i2c_probe(struct i2c_client *i2c,
-               const struct i2c_device_id *id)
+static int pv88060_i2c_probe(struct i2c_client *i2c)
 {
        struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
        struct pv88060 *chip;
@@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = {
                .name = "pv88060",
                .of_match_table = of_match_ptr(pv88060_dt_ids),
        },
-       .probe = pv88060_i2c_probe,
+       .probe_new = pv88060_i2c_probe,
        .id_table = pv88060_i2c_id,
 };
 
index b1d0d97ae935684c5f2640c63e9d9ad2134d963b..784729ec21828ffd45c4b8130c2773592bcfa0ed 100644 (file)
@@ -272,8 +272,7 @@ error_i2c:
 /*
  * I2C driver interface functions
  */
-static int pv88090_i2c_probe(struct i2c_client *i2c,
-               const struct i2c_device_id *id)
+static int pv88090_i2c_probe(struct i2c_client *i2c)
 {
        struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
        struct pv88090 *chip;
@@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = {
                .name = "pv88090",
                .of_match_table = of_match_ptr(pv88090_dt_ids),
        },
-       .probe = pv88090_i2c_probe,
+       .probe_new = pv88090_i2c_probe,
        .id_table = pv88090_i2c_id,
 };
 
index 5b40032264846a5433767105b23845586b8995b6..31f79fda3238b5a6b476a0d24bd83179aa946ffe 100644 (file)
@@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
                }
 
                if (!pdata->dvs_gpio[i]) {
-                       dev_warn(dev, "there is no dvs%d gpio\n", i);
+                       dev_info(dev, "there is no dvs%d gpio\n", i);
                        continue;
                }
 
index 4a91be0ad5aec0536f8016ea5c14187fb95b9944..5c12d57be0405dcf6da00f8d0e941d2dfc997293 100644 (file)
@@ -148,6 +148,7 @@ static struct platform_driver rn5t618_regulator_driver = {
 
 module_platform_driver(rn5t618_regulator_driver);
 
+MODULE_ALIAS("platform:rn5t618-regulator");
 MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
 MODULE_DESCRIPTION("RN5T618 regulator driver");
 MODULE_LICENSE("GPL v2");
index 51f7e8b74d8cb3a8aa535760a161b86d596b2a5d..115f59530852f1e808804ac6e226f9434467e23b 100644 (file)
@@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver);
 /* Module information */
 MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
 MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver");
 MODULE_LICENSE("GPL");
index 4f2dc5ebffdc49773e4ba53d3c1a826c8e98e1c2..23d288278957563d43f3491f557ee890c58a25e6 100644 (file)
@@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver);
 
 /* Module information */
 MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
 MODULE_LICENSE("GPL");
index bdc07739e9a2240b058525eca66a21d1108cd110..4abd3ed31f606d8c4956e6050bb405678704dc39 100644 (file)
@@ -588,7 +588,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
                if (of_property_read_u32(reg_np, "op_mode",
                                &rmode->mode)) {
                        dev_warn(iodev->dev,
-                               "no op_mode property property at %pOF\n",
+                               "no op_mode property at %pOF\n",
                                reg_np);
 
                        rmode->mode = S5M8767_OPMODE_NORMAL_MODE;
@@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit);
 
 /* Module information */
 MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver");
 MODULE_LICENSE("GPL");
index bf1a3508ebc4038caa8a15f816261e4bd8ed02dd..44e4cecbf6dec0600cc2501487a74a3b2bc9fac2 100644 (file)
@@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip)
                dev_dbg(chip->dev, "Fault log: FLT_POR\n");
 }
 
-static int slg51000_i2c_probe(struct i2c_client *client,
-                             const struct i2c_device_id *id)
+static int slg51000_i2c_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct slg51000 *chip;
@@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = {
        .driver = {
                .name = "slg51000-regulator",
        },
-       .probe = slg51000_i2c_probe,
+       .probe_new = slg51000_i2c_probe,
        .id_table = slg51000_i2c_id,
 };
 
index 42e03b2c10a026ba3b175cce8de4f4e9d00b023b..2222e739e62bfe0c078d3f9f1d0c9378547e463c 100644 (file)
@@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = {
 /*
  * I2C driver interface functions
  */
-static int sy8106a_i2c_probe(struct i2c_client *i2c,
-                           const struct i2c_device_id *id)
+static int sy8106a_i2c_probe(struct i2c_client *i2c)
 {
        struct device *dev = &i2c->dev;
        struct regulator_dev *rdev;
@@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = {
                .name = "sy8106a",
                .of_match_table = of_match_ptr(sy8106a_i2c_of_match),
        },
-       .probe = sy8106a_i2c_probe,
+       .probe_new = sy8106a_i2c_probe,
        .id_table = sy8106a_i2c_id,
 };
 
index 92adb4f3ee19dbf9dd935afd66563ef90a1585f1..62d243f3b9040692aa0bd61d9968bc3cbdbc3dc9 100644 (file)
@@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = {
        .val_bits = 8,
 };
 
-static int sy8824_i2c_probe(struct i2c_client *client,
-                           const struct i2c_device_id *id)
+static int sy8824_i2c_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct device_node *np = dev->of_node;
@@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = {
                .name = "sy8824-regulator",
                .of_match_table = of_match_ptr(sy8824_dt_ids),
        },
-       .probe = sy8824_i2c_probe,
+       .probe_new = sy8824_i2c_probe,
        .id_table = sy8824_id,
 };
 module_i2c_driver(sy8824_regulator_driver);
index 89b9314d64c9dbe5fe152b06c252b1567600857c..af9abcd9c16652dca56237e41d95954aa4892e90 100644 (file)
@@ -748,7 +748,7 @@ static int ti_abb_probe(struct platform_device *pdev)
         * We may have shared interrupt register offsets which are
         * write-1-to-clear between domains ensuring exclusivity.
         */
-       abb->int_base = devm_ioremap_nocache(dev, res->start,
+       abb->int_base = devm_ioremap(dev, res->start,
                                             resource_size(res));
        if (!abb->int_base) {
                dev_err(dev, "Unable to map '%s'\n", pname);
@@ -768,7 +768,7 @@ static int ti_abb_probe(struct platform_device *pdev)
         * We may have shared efuse register offsets which are read-only
         * between domains
         */
-       abb->efuse_base = devm_ioremap_nocache(dev, res->start,
+       abb->efuse_base = devm_ioremap(dev, res->start,
                                               resource_size(res));
        if (!abb->efuse_base) {
                dev_err(dev, "Unable to map '%s'\n", pname);
index 7b0e38f8d62752b8c9d925f1548d66c86469dc8f..0edc83089ba2b524a70d4eabf9a2e84b820899c7 100644 (file)
@@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = {
        .wr_table       = &tps65132_no_reg_table,
 };
 
-static int tps65132_probe(struct i2c_client *client,
-                         const struct i2c_device_id *client_id)
+static int tps65132_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct tps65132_regulator *tps;
@@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = {
        .driver = {
                .name = "tps65132",
        },
-       .probe = tps65132_probe,
+       .probe_new = tps65132_probe,
        .id_table = tps65132_id,
 };
 
index 9a9ee8188109828090cbdd10a8b679ea9a01a9b3..cbadb1c9967906e99aa34157374aae8e432ebb53 100644 (file)
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/regulator/coupler.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/sort.h>
 
+#include "internal.h"
+
 struct vctrl_voltage_range {
        int min_uV;
        int max_uV;
@@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
 static int vctrl_get_voltage(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+       int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
 
        return vctrl_calc_output_voltage(vctrl, ctrl_uV);
 }
@@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
        struct regulator *ctrl_reg = vctrl->ctrl_reg;
-       int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
+       int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
        int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
        int ret;
 
        if (req_min_uV >= uV || !vctrl->ovp_threshold)
                /* voltage rising or no OVP */
-               return regulator_set_voltage(
-                       ctrl_reg,
+               return regulator_set_voltage_rdev(ctrl_reg->rdev,
                        vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
-                       vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
+                       vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
+                       PM_SUSPEND_ON);
 
        while (uV > req_min_uV) {
                int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
@@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
                next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
 
-               ret = regulator_set_voltage(ctrl_reg,
+               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+                                           next_ctrl_uV,
                                            next_ctrl_uV,
-                                           next_ctrl_uV);
+                                           PM_SUSPEND_ON);
                if (ret)
                        goto err;
 
@@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
 
 err:
        /* Try to go back to original voltage */
-       regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
+       regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+                                  PM_SUSPEND_ON);
 
        return ret;
 }
@@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
 
        if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
                /* voltage rising or no OVP */
-               ret = regulator_set_voltage(ctrl_reg,
+               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+                                           vctrl->vtable[selector].ctrl,
                                            vctrl->vtable[selector].ctrl,
-                                           vctrl->vtable[selector].ctrl);
+                                           PM_SUSPEND_ON);
                if (!ret)
                        vctrl->sel = selector;
 
@@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                else
                        next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
 
-               ret = regulator_set_voltage(ctrl_reg,
+               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
                                            vctrl->vtable[next_sel].ctrl,
-                                           vctrl->vtable[next_sel].ctrl);
+                                           vctrl->vtable[next_sel].ctrl,
+                                           PM_SUSPEND_ON);
                if (ret) {
                        dev_err(&rdev->dev,
                                "failed to set control voltage to %duV\n",
@@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
 err:
        if (vctrl->sel != orig_sel) {
                /* Try to go back to original voltage */
-               if (!regulator_set_voltage(ctrl_reg,
+               if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+                                          vctrl->vtable[orig_sel].ctrl,
                                           vctrl->vtable[orig_sel].ctrl,
-                                          vctrl->vtable[orig_sel].ctrl))
+                                          PM_SUSPEND_ON))
                        vctrl->sel = orig_sel;
                else
                        dev_warn(&rdev->dev,
@@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
 
-               ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+               ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
                if (ctrl_uV < 0) {
                        dev_err(&pdev->dev, "failed to get control voltage\n");
                        return ctrl_uV;
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
new file mode 100644 (file)
index 0000000..6d5ae25
--- /dev/null
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com>
+// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr>
+//
+// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int ipq4019_vmmc_voltages[] = {
+       1500000, 1800000, 2500000, 3000000,
+};
+
+static const struct regulator_ops ipq4019_regulator_voltage_ops = {
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_ascend,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc vmmc_regulator = {
+       .name           = "vmmcq",
+       .ops            = &ipq4019_regulator_voltage_ops,
+       .type           = REGULATOR_VOLTAGE,
+       .owner          = THIS_MODULE,
+       .volt_table     = ipq4019_vmmc_voltages,
+       .n_voltages     = ARRAY_SIZE(ipq4019_vmmc_voltages),
+       .vsel_reg       = 0,
+       .vsel_mask      = 0x3,
+};
+
+static const struct regmap_config ipq4019_vmmcq_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+};
+
+static int ipq4019_regulator_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct regulator_init_data *init_data;
+       struct regulator_config cfg = {};
+       struct regulator_dev *rdev;
+       struct resource *res;
+       struct regmap *rmap;
+       void __iomem *base;
+
+       init_data = of_get_regulator_init_data(dev, dev->of_node,
+                                              &vmmc_regulator);
+       if (!init_data)
+               return -EINVAL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config);
+       if (IS_ERR(rmap))
+               return PTR_ERR(rmap);
+
+       cfg.dev = dev;
+       cfg.init_data = init_data;
+       cfg.of_node = dev->of_node;
+       cfg.regmap = rmap;
+
+       rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg);
+       if (IS_ERR(rdev)) {
+               dev_err(dev, "Failed to register regulator: %ld\n",
+                       PTR_ERR(rdev));
+               return PTR_ERR(rdev);
+       }
+       platform_set_drvdata(pdev, rdev);
+
+       return 0;
+}
+
+static const struct of_device_id regulator_ipq4019_of_match[] = {
+       { .compatible = "qcom,vqmmc-ipq4019-regulator", },
+       {},
+};
+
+static struct platform_driver ipq4019_regulator_driver = {
+       .probe = ipq4019_regulator_probe,
+       .driver = {
+               .name = "vqmmc-ipq4019-regulator",
+               .of_match_table = of_match_ptr(regulator_ipq4019_of_match),
+       },
+};
+module_platform_driver(ipq4019_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>");
+MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator");
index ca1d49146f61143537d55dd07511866a673e4ad4..7597c70e04d5b42b54d18a8c7de022bc7586b99a 100644 (file)
@@ -787,7 +787,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
                return ERR_PTR(-ENOMEM);
 
        rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
-       if (!IS_ERR(rstc)) {
+       if (!IS_ERR_OR_NULL(rstc)) {
                *ptr = rstc;
                devres_add(dev, ptr);
        } else {
@@ -861,8 +861,7 @@ static int of_reset_control_get_count(struct device_node *node)
  * @acquired: only one reset control may be acquired for a given controller
  *            and ID
  *
- * Returns pointer to allocated reset_control_array on success or
- * error on failure
+ * Returns pointer to allocated reset_control on success or error on failure
  */
 struct reset_control *
 of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
@@ -915,8 +914,7 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get);
  * that just have to be asserted or deasserted, without any
  * requirements on the order.
  *
- * Returns pointer to allocated reset_control_array on success or
- * error on failure
+ * Returns pointer to allocated reset_control on success or error on failure
  */
 struct reset_control *
 devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
@@ -930,7 +928,7 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
                return ERR_PTR(-ENOMEM);
 
        rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
-       if (IS_ERR(rstc)) {
+       if (IS_ERR_OR_NULL(rstc)) {
                devres_free(devres);
                return rstc;
        }
index a608f445dad6f8cd6a2f9bafe00af917b86c2130..f213264c8567bcac5bec4a9f631c46de4190f8a0 100644 (file)
@@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
-           !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
-               dev_err(kdev, "incorrect register range\n");
-               return -EINVAL;
-       }
-
        priv->base = devm_ioremap_resource(kdev, res);
        if (IS_ERR(priv->base))
                return PTR_ERR(priv->base);
index df2829dd55ad6a3fe5e6934cd1614c20b790198b..2ecd8752b088b6cfe1bdb97fd57511d72778c562 100644 (file)
@@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time)
        save_control = CMOS_READ(RTC_CONTROL);
        CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
        save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-
-#ifdef CONFIG_X86
-       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-            boot_cpu_data.x86 == 0x17) ||
-            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
-               CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
-                       RTC_FREQ_SELECT);
-               save_freq_select &= ~RTC_DIV_RESET2;
-       } else
-               CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
-                       RTC_FREQ_SELECT);
-#else
-       CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
-#endif
+       CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
 
 #ifdef CONFIG_MACH_DECSTATION
        CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
index 5249fc99fd5fb7b2eadbdf9a14c3ef2c7b94c96f..9135e21017523959d257484f272b493232bb7b28 100644 (file)
@@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
                irqen = irqsta & ~RTC_IRQ_EN_AL;
                mutex_lock(&rtc->lock);
                if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
-                                irqen) < 0)
+                                irqen) == 0)
                        mtk_rtc_write_trigger(rtc);
                mutex_unlock(&rtc->lock);
 
@@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
        alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
        mutex_unlock(&rtc->lock);
 
-       tm->tm_sec = data[RTC_OFFSET_SEC];
-       tm->tm_min = data[RTC_OFFSET_MIN];
-       tm->tm_hour = data[RTC_OFFSET_HOUR];
-       tm->tm_mday = data[RTC_OFFSET_DOM];
-       tm->tm_mon = data[RTC_OFFSET_MTH];
-       tm->tm_year = data[RTC_OFFSET_YEAR];
+       tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
+       tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
+       tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
+       tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
+       tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
+       tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
 
        tm->tm_year += RTC_MIN_YEAR_OFFSET;
        tm->tm_mon--;
@@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        tm->tm_year -= RTC_MIN_YEAR_OFFSET;
        tm->tm_mon++;
 
-       data[RTC_OFFSET_SEC] = tm->tm_sec;
-       data[RTC_OFFSET_MIN] = tm->tm_min;
-       data[RTC_OFFSET_HOUR] = tm->tm_hour;
-       data[RTC_OFFSET_DOM] = tm->tm_mday;
-       data[RTC_OFFSET_MTH] = tm->tm_mon;
-       data[RTC_OFFSET_YEAR] = tm->tm_year;
-
        mutex_lock(&rtc->lock);
+       ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
+                              data, RTC_OFFSET_COUNT);
+       if (ret < 0)
+               goto exit;
+
+       data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
+                               (tm->tm_sec & RTC_AL_SEC_MASK));
+       data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
+                               (tm->tm_min & RTC_AL_MIN_MASK));
+       data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
+                               (tm->tm_hour & RTC_AL_HOU_MASK));
+       data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
+                               (tm->tm_mday & RTC_AL_DOM_MASK));
+       data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
+                               (tm->tm_mon & RTC_AL_MTH_MASK));
+       data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
+                               (tm->tm_year & RTC_AL_YEA_MASK));
+
        if (alm->enabled) {
                ret = regmap_bulk_write(rtc->regmap,
                                        rtc->addr_base + RTC_AL_SEC,
index 579b3ff5c644fd9df948ed2773bd7a99694e27ca..feb1f8e52c0084054003b131e17b73a737a1af13 100644 (file)
@@ -504,7 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
        if (unlikely(!rtc->res))
                return -EBUSY;
 
-       rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start,
+       rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start,
                                        rtc->regsize);
        if (unlikely(!rtc->regbase))
                return -EINVAL;
index 8dcd20b34dde3d15601596d230dc0030c71b6c98..852f5f3b359283a0e57c0f6f7f4494caadb41c57 100644 (file)
@@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node)
 CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
                      sun50i_h6_rtc_clk_init);
 
+/*
+ * The R40 user manual is self-conflicting on whether the prescaler is
+ * fixed or configurable. The clock diagram shows it as fixed, but there
+ * is also a configurable divider in the RTC block.
+ */
+static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
+       .rc_osc_rate = 16000000,
+       .fixed_prescaler = 512,
+};
+static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
+{
+       sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
+}
+CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
+                     sun8i_r40_rtc_clk_init);
+
 static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
        .rc_osc_rate = 32000,
        .has_out_clk = 1,
index c94184d080f84d5bc4c0211159996fe43fbbbc68..a28b9ff823780de364ba7e79d871a92c464bce49 100644 (file)
@@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
 {
        struct dasd_eckd_private *private = device->private;
        int fcx_in_css, fcx_in_gneq, fcx_in_features;
-       int tpm, mdc;
+       unsigned int mdc;
+       int tpm;
 
        if (dasd_nofcx)
                return 0;
@@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
                return 0;
 
        mdc = ccw_device_get_mdc(device->cdev, 0);
-       if (mdc < 0) {
+       if (mdc == 0) {
                dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
                return 0;
        } else {
@@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
 {
        struct dasd_eckd_private *private = device->private;
-       int mdc;
+       unsigned int mdc;
        u32 fcx_max_data;
 
        if (private->fcx_max_data) {
                mdc = ccw_device_get_mdc(device->cdev, lpm);
-               if ((mdc < 0)) {
+               if (mdc == 0) {
                        dev_warn(&device->cdev->dev,
                                 "Detecting the maximum data size for zHPF "
                                 "requests failed (rc=%d) for a new path %x\n",
@@ -2073,7 +2074,7 @@ out_err2:
        dasd_free_block(device->block);
        device->block = NULL;
 out_err1:
-       kfree(private->conf_data);
+       dasd_eckd_clear_conf_data(device);
        kfree(device->private);
        device->private = NULL;
        return rc;
@@ -2082,7 +2083,6 @@ out_err1:
 static void dasd_eckd_uncheck_device(struct dasd_device *device)
 {
        struct dasd_eckd_private *private = device->private;
-       int i;
 
        if (!private)
                return;
@@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
        private->sneq = NULL;
        private->vdsneq = NULL;
        private->gneq = NULL;
-       private->conf_len = 0;
-       for (i = 0; i < 8; i++) {
-               kfree(device->path[i].conf_data);
-               if ((__u8 *)device->path[i].conf_data ==
-                   private->conf_data) {
-                       private->conf_data = NULL;
-                       private->conf_len = 0;
-               }
-               device->path[i].conf_data = NULL;
-               device->path[i].cssid = 0;
-               device->path[i].ssid = 0;
-               device->path[i].chpid = 0;
-       }
-       kfree(private->conf_data);
-       private->conf_data = NULL;
+       dasd_eckd_clear_conf_data(device);
 }
 
 static struct dasd_ccw_req *
index 8f75df06e893cd1b09ceb31e44cbb5442401547b..45ddabec40176921e65b58ee3033d5ba9cf23093 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * Coypright IBM Corp. 1999, 2000
+ * Copyright IBM Corp. 1999, 2000
  *
  */
 
index 1770b99f607e6638570fdc39b118a8bf97c41f2b..8d4d69ea5bafd2c4f968c27378adf827591e42e5 100644 (file)
@@ -5,7 +5,7 @@
  *                 Carsten Otte <Cotte@de.ibm.com>
  *                 Martin Schwidefsky <schwidefsky@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
- * Coypright IBM Corp. 1999, 2002
+ * Copyright IBM Corp. 1999, 2002
  *
  * /proc interface for the dasd driver.
  *
index 65841af15748016d4ca0eff464b219931e30b538..ccecf6b9504e8d2305fe75ce92d9056a78beaa9f 100644 (file)
@@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
  * @mask: mask of paths to use
  *
  * Return the number of 64K-bytes blocks all paths at least support
- * for a transport command. Return values <= 0 indicate failures.
+ * for a transport command. Return value 0 indicates failure.
  */
 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
 {
index a1915061932eb0325d95a644c6e8d6249bd09d47..5256e3ce84e56a5144368d9017c17da5bb336c12 100644 (file)
@@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
                drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
                if (!!devres != !!drvres)
                        return -ENODEV;
-               /* (re-)init queue's state machine */
-               ap_queue_reinit_state(to_ap_queue(dev));
        }
 
        /* Add queue/card to list of active queues/cards */
index 433b7b64368d6710523c468eaac5f3270cc5701f..bb35ba4a8d243955545596908170fbdd10620400 100644 (file)
@@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
                               int comp_device_type, unsigned int functions);
index dad2be333d826fd5d49601e6b9c11f11b118b755..37c3bdc3642dc6756dbcf10a6a34f2d9974fb930 100644 (file)
@@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
        aq->ap_dev.device.type = &ap_queue_type;
        aq->ap_dev.device_type = device_type;
        aq->qid = qid;
-       aq->state = AP_STATE_RESET_START;
+       aq->state = AP_STATE_UNBOUND;
        aq->interrupt = AP_INTR_DISABLED;
        spin_lock_init(&aq->lock);
        INIT_LIST_HEAD(&aq->list);
@@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
        spin_unlock_bh(&aq->lock);
 }
 
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
 {
        spin_lock_bh(&aq->lock);
        aq->state = AP_STATE_RESET_START;
        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
        spin_unlock_bh(&aq->lock);
 }
+EXPORT_SYMBOL(ap_queue_init_state);
index c1db64a2db21b92e4f44b475a828a8357a097206..110fe9d0cb91090b4879021a81c24e86f4a8fa59 100644 (file)
@@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
        prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
 
        /* do some plausibility checks on the key block */
-       if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
-           prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+       if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+           prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
                DEBUG_ERR("%s reply with invalid or unknown key block\n",
                          __func__);
                rc = -EIO;
index c50f3e86cc7487377e09211264a4d4cc43ec4b61..7cbb384ec5352d618d15f0015f7e60f87c9e4a09 100644 (file)
@@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
        zq->queue = aq;
        zq->online = 1;
        atomic_set(&zq->load, 0);
+       ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2A_CLEANUP_TIME,
        aq->private = zq;
index 35c7c6672713b70b33959b48322de5c0254c4a29..c78c0d119806f17784a0a38dac53f6a48b616b6d 100644 (file)
@@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        zq->queue = aq;
        zq->online = 1;
        atomic_set(&zq->load, 0);
+       ap_rapq(aq->qid);
        rc = zcrypt_cex2c_rng_supported(aq);
        if (rc < 0) {
                zcrypt_queue_free(zq);
@@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        else
                zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
                                         MSGTYPE06_VARIANT_NORNG);
+       ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2C_CLEANUP_TIME;
        aq->private = zq;
index 442e3d6162f761645d5f1df2849d28c3b486c855..6fabc906114c0e3b9de976bc6a5670923662de20 100644 (file)
@@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
        zq->queue = aq;
        zq->online = 1;
        atomic_set(&zq->load, 0);
+       ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX4_CLEANUP_TIME,
        aq->private = zq;
index b9a2349e4b909b5de8a15cc728178bec9a314224..29facb9136715638719bb271f86888ec120616f6 100644 (file)
@@ -655,17 +655,17 @@ static int qeth_check_idx_response(struct qeth_card *card,
        unsigned char *buffer)
 {
        QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
-       if ((buffer[2] & 0xc0) == 0xc0) {
+       if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
                QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
                                 buffer[4]);
                QETH_CARD_TEXT(card, 2, "ckidxres");
                QETH_CARD_TEXT(card, 2, " idxterm");
-               QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
-               if (buffer[4] == 0xf6) {
+               QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
+               if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
+                   buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
                        dev_err(&card->gdev->dev,
-                       "The qeth device is not configured "
-                       "for the OSI layer required by z/VM\n");
-                       return -EPERM;
+                               "The device does not support the configured transport mode\n");
+                       return -EPROTONOSUPPORT;
                }
                return -EIO;
        }
@@ -742,10 +742,10 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
        case 0:
                break;
        case -EIO:
-               qeth_clear_ipacmd_list(card);
                qeth_schedule_recovery(card);
                /* fall through */
        default:
+               qeth_clear_ipacmd_list(card);
                goto out;
        }
 
@@ -2482,50 +2482,46 @@ static int qeth_mpc_initialize(struct qeth_card *card)
        rc = qeth_cm_enable(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "2err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_cm_setup(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "3err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_ulp_enable(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "4err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_ulp_setup(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_alloc_qdio_queues(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_qdio_establish(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "6err%d", rc);
                qeth_free_qdio_queues(card);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_qdio_activate(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "7err%d", rc);
-               goto out_qdio;
+               return rc;
        }
        rc = qeth_dm_act(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "8err%d", rc);
-               goto out_qdio;
+               return rc;
        }
 
        return 0;
-out_qdio:
-       qeth_qdio_clear_card(card, !IS_IQD(card));
-       qdio_free(CARD_DDEV(card));
-       return rc;
 }
 
 void qeth_print_status_message(struct qeth_card *card)
@@ -3429,11 +3425,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
                        goto out;
                }
 
-               if (card->state != CARD_STATE_DOWN) {
-                       rc = -1;
-                       goto out;
-               }
-
                qeth_free_qdio_queues(card);
                card->options.cq = cq;
                rc = 0;
@@ -4779,7 +4770,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "qdioest");
 
-       qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
+       qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
        if (!qib_param_field) {
                rc =  -ENOMEM;
                goto out_free_nothing;
@@ -5035,10 +5026,8 @@ retriable:
        }
        if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
                rc = qeth_query_setdiagass(card);
-               if (rc < 0) {
+               if (rc)
                        QETH_CARD_TEXT_(card, 2, "8err%d", rc);
-                       goto out;
-               }
        }
 
        if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
index 88f4dc140751c686c958519f0655e9b8e1f02e68..458db34239a7ce3c55b7cb4882920fb62ba27f38 100644 (file)
@@ -421,7 +421,7 @@ struct qeth_ipacmd_setassparms {
        } data;
 } __attribute__ ((packed));
 
-#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\
+#define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\
                                               data.field)
 
 /* SETRTG IPA Command:    ****************************************************/
@@ -535,7 +535,7 @@ struct qeth_ipacmd_setadpparms {
        } data;
 } __attribute__ ((packed));
 
-#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\
+#define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\
                                               data.field)
 
 /* CREATE_ADDR IPA Command:    ***********************************************/
@@ -648,7 +648,7 @@ struct qeth_ipacmd_vnicc {
        } data;
 };
 
-#define VNICC_DATA_SIZEOF(field)       FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\
+#define VNICC_DATA_SIZEOF(field)       sizeof_field(struct qeth_ipacmd_vnicc,\
                                                     data.field)
 
 /* SETBRIDGEPORT IPA Command:   *********************************************/
@@ -729,7 +729,7 @@ struct qeth_ipacmd_setbridgeport {
        } data;
 } __packed;
 
-#define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\
+#define SBP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setbridgeport,\
                                             data.field)
 
 /* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
@@ -790,7 +790,7 @@ struct qeth_ipa_cmd {
        } data;
 } __attribute__ ((packed));
 
-#define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field)
+#define IPA_DATA_SIZEOF(field) sizeof_field(struct qeth_ipa_cmd, data.field)
 
 /*
  * special command for ARP processing.
@@ -899,6 +899,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
 #define QETH_IDX_ACT_ERR_AUTH          0x1E
 #define QETH_IDX_ACT_ERR_AUTH_USER     0x20
 
+#define QETH_IDX_TERMINATE             0xc0
+#define QETH_IDX_TERMINATE_MASK                0xc0
+#define QETH_IDX_TERM_BAD_TRANSPORT    0x41
+#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6
+
 #define PDU_ENCAPSULATION(buffer) \
        (buffer + *(buffer + (*(buffer + 0x0b)) + \
         *(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
index e81170ab6d9aa4847adade22f826a183ab28df05..7bd86027f5595d53d44aec78f88059bf204d0fe3 100644 (file)
@@ -207,7 +207,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
                card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
        } else if (sysfs_streq(buf, "prio_queueing_vlan")) {
                if (IS_LAYER3(card)) {
-                       rc = -ENOTSUPP;
+                       rc = -EOPNOTSUPP;
                        goto out;
                }
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
index 9086bc04fa6bd6eb5ccf1e9b62229b6716f7a888..47d37e75dda68530800fdf09c3a9e2617ea2a936 100644 (file)
@@ -287,14 +287,15 @@ static void qeth_l2_stop_card(struct qeth_card *card)
                card->state = CARD_STATE_HARDSETUP;
        }
        if (card->state == CARD_STATE_HARDSETUP) {
-               qeth_qdio_clear_card(card, 0);
                qeth_drain_output_queues(card);
                qeth_clear_working_pool_list(card);
                card->state = CARD_STATE_DOWN;
        }
 
+       qeth_qdio_clear_card(card, 0);
        flush_workqueue(card->event_wq);
        card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+       card->info.promisc_mode = 0;
 }
 
 static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -1951,8 +1952,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
 /* check if VNICC is currently enabled */
 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
 {
-       /* if everything is turned off, VNICC is not active */
-       if (!card->options.vnicc.cur_chars)
+       if (!card->options.vnicc.sup_chars)
                return false;
        /* default values are only OK if rx_bcast was not enabled by user
         * or the card is offline.
@@ -2039,8 +2039,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
        /* enforce assumed default values and recover settings, if changed  */
        error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
                                               timeout);
-       chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
-       chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
+       /* Change chars, if necessary  */
+       chars_tmp = card->options.vnicc.wanted_chars ^
+                   card->options.vnicc.cur_chars;
        chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
        for_each_set_bit(i, &chars_tmp, chars_len) {
                vnicc = BIT(i);
index f70c7aac2dcc3aeda624fc8851a70cbb525c6029..7fa325cf6f8d36323c43f6dacb72445830848a17 100644 (file)
@@ -262,7 +262,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
                return;
 
        mutex_lock(&card->sbp_lock);
-       if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
+       if (!card->options.sbp.reflect_promisc &&
+           card->options.sbp.role != QETH_SBP_ROLE_NONE) {
                /* Conditional to avoid spurious error messages */
                qeth_bridgeport_setrole(card, card->options.sbp.role);
                /* Let the callback function refresh the stored role value. */
index 27126330a4b003bd4e83e01e6ac654d191df04b0..5508ab89b518dd306a7a63c399d69ff7dba64b9f 100644 (file)
@@ -1307,13 +1307,14 @@ static void qeth_l3_stop_card(struct qeth_card *card)
                card->state = CARD_STATE_HARDSETUP;
        }
        if (card->state == CARD_STATE_HARDSETUP) {
-               qeth_qdio_clear_card(card, 0);
                qeth_drain_output_queues(card);
                qeth_clear_working_pool_list(card);
                card->state = CARD_STATE_DOWN;
        }
 
+       qeth_qdio_clear_card(card, 0);
        flush_workqueue(card->event_wq);
+       card->info.promisc_mode = 0;
 }
 
 static void qeth_l3_set_promisc_mode(struct qeth_card *card)
index f9067ed6c7d32168997b33d529128a1f953ff7b5..e8c848f72c6d2ccd3c6c278b6f4fe4d3c18b1b1e 100644 (file)
@@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       int rc = 0;
        char *tmp;
-       int rc;
 
        if (!IS_IQD(card))
                return -EPERM;
-       if (card->state != CARD_STATE_DOWN)
-               return -EPERM;
-       if (card->options.sniffer)
-               return -EPERM;
-       if (card->options.cq == QETH_CQ_NOTAVAILABLE)
-               return -EPERM;
+
+       mutex_lock(&card->conf_mutex);
+       if (card->state != CARD_STATE_DOWN) {
+               rc = -EPERM;
+               goto out;
+       }
+
+       if (card->options.sniffer) {
+               rc = -EPERM;
+               goto out;
+       }
+
+       if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
+               rc = -EPERM;
+               goto out;
+       }
 
        tmp = strsep((char **)&buf, "\n");
-       if (strlen(tmp) > 8)
-               return -EINVAL;
+       if (strlen(tmp) > 8) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        if (card->options.hsuid[0])
                /* delete old ip address */
@@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                card->options.hsuid[0] = '\0';
                memcpy(card->dev->perm_addr, card->options.hsuid, 9);
                qeth_configure_cq(card, QETH_CQ_DISABLED);
-               return count;
+               goto out;
        }
 
-       if (qeth_configure_cq(card, QETH_CQ_ENABLED))
-               return -EPERM;
+       if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
+               rc = -EPERM;
+               goto out;
+       }
 
        snprintf(card->options.hsuid, sizeof(card->options.hsuid),
                 "%-8s", tmp);
@@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 
        rc = qeth_l3_modify_hsuid(card, true);
 
+out:
+       mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
 
index 90cf4691b8c3592c3f7d6dcda993b96da639ca81..a7881f8eb05ee4ffb9571da0d2a9d21b62abb92a 100644 (file)
@@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
 config BLK_DEV_SD
        tristate "SCSI disk support"
        depends on SCSI
+       select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
        ---help---
          If you want to use SCSI hard disks, Fibre Channel disks,
          Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
index e36608ce937ab67a2481e6a3616031eb28d6e671..33dbc051bff97f3525f5e9e736f9e2ac7bc3639e 100644 (file)
@@ -535,7 +535,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
        if ((le32_to_cpu(get_name_reply->status) == CT_OK)
         && (get_name_reply->data[0] != '\0')) {
                char *sp = get_name_reply->data;
-               int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+               int data_size = sizeof_field(struct aac_get_name_resp, data);
 
                sp[data_size - 1] = '\0';
                while (*sp == ' ')
@@ -574,7 +574,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
 
        dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 
-       data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+       data_size = sizeof_field(struct aac_get_name_resp, data);
 
        cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
 
index 8466aa784ec1aab828af2399fa80b3792e0d6290..8b891a05d9e76bc0e1b73857b6dc405df69204b2 100644 (file)
@@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
                if (!request_mem_region(start, 0x1000, "aic79xx"))
                        error = ENOMEM;
                if (!error) {
-                       *maddr = ioremap_nocache(base_page, base_offset + 512);
+                       *maddr = ioremap(base_page, base_offset + 512);
                        if (*maddr == NULL) {
                                error = ENOMEM;
                                release_mem_region(start, 0x1000);
index 717d8d1082ce18ae9899870e43238c1443fdb36f..9b293b1f0b716a7dff86d45f178e5eeebef9ccf9 100644 (file)
@@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
                if (!request_mem_region(start, 0x1000, "aic7xxx"))
                        error = ENOMEM;
                if (error == 0) {
-                       *maddr = ioremap_nocache(start, 256);
+                       *maddr = ioremap(start, 256);
                        if (*maddr == NULL) {
                                error = ENOMEM;
                                release_mem_region(start, 0x1000);
index db687ef8a99ec5590e86d2248cbd87a823a59098..40dc8eac0e3a3982b29958a7d918b5bfb62ed6ef 100644 (file)
@@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
                break;
        }
        case ACB_ADAPTER_TYPE_C:{
-               acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+               acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
                if (!acb->pmuC) {
                        printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
                        return false;
index 063dccc18f70a04ff362a276087637cbcb0c5c76..5f9f0b18ddf372365c0df1b2916d408f8755e836 100644 (file)
@@ -1300,7 +1300,7 @@ struct be_cmd_get_port_name {
 
 /* Returns the number of items in the field array. */
 #define BE_NUMBER_OF_FIELD(_type_, _field_)    \
-       (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
+       (sizeof_field(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
 
 /**
  * Different types of iSCSI completions to host driver for both initiator
index 0760d0bd8a10b297837acd3d1042a914fe281104..9b81cfbbc5c532d7e2a164805ed40efbdccb7ff9 100644 (file)
@@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
        u8 __iomem *addr;
        int pcicfg_reg;
 
-       addr = ioremap_nocache(pci_resource_start(pcidev, 2),
+       addr = ioremap(pci_resource_start(pcidev, 2),
                               pci_resource_len(pcidev, 2));
        if (addr == NULL)
                return -ENOMEM;
        phba->ctrl.csr = addr;
        phba->csr_va = addr;
 
-       addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
+       addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024);
        if (addr == NULL)
                goto pci_map_err;
        phba->ctrl.db = addr;
@@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
        else
                pcicfg_reg = 0;
 
-       addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+       addr = ioremap(pci_resource_start(pcidev, pcicfg_reg),
                               pci_resource_len(pcidev, pcicfg_reg));
 
        if (addr == NULL)
index f069e09beb10846244fd0e846a244455a07fc6a5..6f8335ddb1f274222dfe1b1fec004e4fdfc33ce2 100644 (file)
@@ -1414,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
        reg_base = pci_resource_start(hba->pcidev,
                                        BNX2X_DOORBELL_PCI_BAR);
        reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
-       tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+       tgt->ctx_base = ioremap(reg_base + reg_off, 4);
        if (!tgt->ctx_base)
                return -ENOMEM;
        return 0;
index 12666313b937972495f128ff902296b0035d8ede..e53ebc5eff85e3f7d211862cdecbd7ed3f5820ed 100644 (file)
@@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                reg_base = pci_resource_start(ep->hba->pcidev,
                                              BNX2X_DOORBELL_PCI_BAR);
                reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
-               ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+               ep->qp.ctx_base = ioremap(reg_base + reg_off, 4);
                if (!ep->qp.ctx_base)
                        return -ENOMEM;
                goto arm_cq;
@@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
                /* 5709 device in normal node and 5706/5708 devices */
                reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
 
-       ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
+       ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off,
                                          MB_KERNEL_CTX_SIZE);
        if (!ep->qp.ctx_base)
                return -ENOMEM;
index 2e8a3ac575cb50a6d4eedbf71b723a7fef446179..8dea7d53788a990017934ec5518d3fc6d6ee7bdb 100644 (file)
@@ -529,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
                goto err_free_hw;
 
        /* Get the start address of registers from BAR 0 */
-       hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+       hw->regstart = ioremap(pci_resource_start(pdev, 0),
                                       pci_resource_len(pdev, 0));
        if (!hw->regstart) {
                csio_err(hw, "Could not map BAR 0, regstart = %p\n",
index 0d044c1659609e6b39abfdb2fb9958428317f414..4bc794d2f51c96dd5da708d8c5ef10dcbd14978d 100644 (file)
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
                "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
        cxgbi_hbas_remove(cdev);
        cxgbi_device_portmap_cleanup(cdev);
-       cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+       if (cdev->cdev2ppm)
+               cxgbi_ppm_release(cdev->cdev2ppm(cdev));
        if (cdev->pmap.max_connect)
                cxgbi_free_big_mem(cdev->pmap.port_csk);
        kfree(cdev);
@@ -2746,7 +2747,7 @@ static int __init libcxgbi_init_module(void)
 {
        pr_info("%s", version);
 
-       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+       BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
                     sizeof(struct cxgbi_skb_cb));
        return 0;
 }
index 8ef150dfb6f7d8d560c968f3f05213458aecde98..b60795893994ca10c723a592798dc51d4995e9c3 100644 (file)
@@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
                return SCSI_MLQUEUE_HOST_BUSY;
 
+       if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
        rport = starget_to_rport(scsi_target(sc->device));
        if (!rport) {
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
index 1f55b9e4e74ab63262c0a1ce9e4598243e533cb2..1b88a3b53eee2e4f06c0e41aad1de7f75db6bad4 100644 (file)
@@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
 
 int vnic_dev_hang_notify(struct vnic_dev *vdev)
 {
-       u64 a0, a1;
+       u64 a0 = 0, a1 = 0;
        int wait = 1000;
        return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
 }
 
 int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
 {
-       u64 a0, a1;
+       u64 a[2] = {};
        int wait = 1000;
        int err, i;
 
        for (i = 0; i < ETH_ALEN; i++)
                mac_addr[i] = 0;
 
-       err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+       err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
        if (err)
                return err;
 
        for (i = 0; i < ETH_ALEN; i++)
-               mac_addr[i] = ((u8 *)&a0)[i];
+               mac_addr[i] = ((u8 *)&a)[i];
 
        return 0;
 }
@@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
 
 void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
 {
-       u64 a0 = 0, a1 = 0;
+       u64 a[2] = {};
        int wait = 1000;
        int err;
        int i;
 
        for (i = 0; i < ETH_ALEN; i++)
-               ((u8 *)&a0)[i] = addr[i];
+               ((u8 *)&a)[i] = addr[i];
 
-       err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+       err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
        if (err)
                pr_err("Can't add addr [%pM], %d\n", addr, err);
 }
 
 void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
 {
-       u64 a0 = 0, a1 = 0;
+       u64 a[2] = {};
        int wait = 1000;
        int err;
        int i;
 
        for (i = 0; i < ETH_ALEN; i++)
-               ((u8 *)&a0)[i] = addr[i];
+               ((u8 *)&a)[i] = addr[i];
 
-       err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+       err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
        if (err)
                pr_err("Can't del addr [%pM], %d\n", addr, err);
 }
index 216e557f703e6c0b466fc29dddc05364b3127bf2..1a4ddfacb45808d96682b4bcd659aaacd0422c69 100644 (file)
@@ -6876,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
 {
        ulong page_base = ((ulong) base) & PAGE_MASK;
        ulong page_offs = ((ulong) base) - page_base;
-       void __iomem *page_remapped = ioremap_nocache(page_base,
+       void __iomem *page_remapped = ioremap(page_base,
                page_offs + size);
 
        return page_remapped ? (page_remapped + page_offs) : NULL;
index abac2f350aeedf579c0f34bdc11f388c49743b67..c48a73a0f517966ac2986e0c9e1897b717ec7eb8 100644 (file)
@@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev)
 
        hostdata->dev = &dev->dev;
        dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
-       hostdata->base = ioremap_nocache(base, 0x100);
+       hostdata->base = ioremap(base, 0x100);
        hostdata->differential = 0;
 
        if (dev->id.sversion == LASI_700_SVERSION) {
index ebd47c0cf9e93fd6a9b21a05e3bde6e82f6b8d73..70b99c0e2e678c4767af956704504ef39fb2276e 100644 (file)
@@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
 
        ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
 
-       spin_lock(&session->frwd_lock);
+       spin_lock_bh(&session->frwd_lock);
        task = (struct iscsi_task *)sc->SCp.ptr;
        if (!task) {
                /*
@@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
 done:
        if (task)
                task->last_timeout = jiffies;
-       spin_unlock(&session->frwd_lock);
+       spin_unlock_bh(&session->frwd_lock);
        ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
                     "timer reset" : "shutdown or nh");
        return rc;
index f47b4b281b14ab5ed05973faf00c2aa09ec5b7c0..d7302c2052f916f74c909a3f35941e5e79b3c68e 100644 (file)
@@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
                else
                        dev->dev_type = SAS_SATA_DEV;
                dev->tproto = SAS_PROTOCOL_SATA;
-       } else {
+       } else if (port->oob_mode == SAS_OOB_MODE) {
                struct sas_identify_frame *id =
                        (struct sas_identify_frame *) dev->frame_rcvd;
                dev->dev_type = id->dev_type;
                dev->iproto = id->initiator_bits;
                dev->tproto = id->target_bits;
+       } else {
+               /* If the oob mode is OOB_NOT_CONNECTED, the port is
+                * disconnected due to race with PHY down. We cannot
+                * continue to discover this port
+                */
+               sas_put_device(dev);
+               pr_warn("Port %016llx is disconnected when discovering\n",
+                       SAS_ADDR(port->attached_sas_addr));
+               return -ENODEV;
        }
 
        sas_init_dev(dev);
index d4e1b120cc9ece1a6e4dcf265cd6ca56f812dd1d..0ea03ae93d91d7bcedb7870be83d711217b9a082 100644 (file)
@@ -4489,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
        phba->mbox_ext_buf_ctx.seqNum++;
        nemb_tp = phba->mbox_ext_buf_ctx.nembType;
 
-       dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
-       if (!dd_data) {
-               rc = -ENOMEM;
-               goto job_error;
-       }
-
        pbuf = (uint8_t *)dmabuf->virt;
        size = job->request_payload.payload_len;
        sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4531,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
                                "2968 SLI_CONFIG ext-buffer wr all %d "
                                "ebuffers received\n",
                                phba->mbox_ext_buf_ctx.numBuf);
+
+               dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+               if (!dd_data) {
+                       rc = -ENOMEM;
+                       goto job_error;
+               }
+
                /* mailbox command structure for base driver */
                pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (!pmboxq) {
@@ -4579,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
        return SLI_CONFIG_HANDLED;
 
 job_error:
+       if (pmboxq)
+               mempool_free(pmboxq, phba->mbox_mem_pool);
        lpfc_bsg_dma_page_free(phba, dmabuf);
        kfree(dd_data);
 
index 2e6a68d9ea4feaef041a2ed1851d1a2fcb6676af..a5ecbce4eda238a4376ef2230f966ae7eb2c5cb2 100644 (file)
@@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = {
        .read =         lpfc_debugfs_read,
        .release =      lpfc_debugfs_ras_log_release,
 };
-#endif
 
 #undef lpfc_debugfs_op_dumpHBASlim
 static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
@@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
        .write =        lpfc_idiag_extacc_write,
        .release =      lpfc_idiag_cmd_release,
 };
-
+#endif
 
 /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
  * @phba: Pointer to HBA context object.
index 6298b17290989d1f04458864756b0952aa131572..6a04fdb3fbf219a073d1ac65a97dd0d33eb7ecf9 100644 (file)
@@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
                        break;
                default:
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                                       "1804 Invalid asynchrous event code: "
+                                       "1804 Invalid asynchronous event code: "
                                        "x%x\n", bf_get(lpfc_trailer_code,
                                        &cq_event->cqe.mcqe_cmpl));
                        break;
index db4a04a207ecee98a15e702f18cc20d59d52bcfd..f6c8963c915d4aeda456299cddc62d4f7b03c811 100644 (file)
@@ -1985,6 +1985,8 @@ out_unlock:
 
 /* Declare and initialization an instance of the FC NVME template. */
 static struct nvme_fc_port_template lpfc_nvme_template = {
+       .module = THIS_MODULE,
+
        /* initiator-based functions */
        .localport_delete  = lpfc_nvme_localport_delete,
        .remoteport_delete = lpfc_nvme_remoteport_delete,
index c82b5792da98ce5ca09627ab984338d58189a824..625c046ac4efae6778dc665e80395d4a886d7827 100644 (file)
@@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
        psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
        spin_unlock_irq(&phba->hbalock);
 
-       /* wake up worker thread to post asynchronlous mailbox command */
+       /* wake up worker thread to post asynchronous mailbox command */
        lpfc_worker_wake_up(phba);
 }
 
@@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
                return rc;
        }
 
-       /* Now, interrupt mode asynchrous mailbox command */
+       /* Now, interrupt mode asynchronous mailbox command */
        rc = lpfc_mbox_cmd_check(phba, mboxq);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
 }
 
 /**
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
  * @phba: Pointer to HBA context object.
  * @cqe: Pointer to mailbox completion queue entry.
  *
- * This routine process a mailbox completion queue entry with asynchrous
+ * This routine process a mailbox completion queue entry with asynchronous
  * event.
  *
  * Return: true if work posted to worker thread, otherwise false.
@@ -13270,7 +13270,7 @@ out_no_mqe_complete:
  * @cqe: Pointer to mailbox completion queue entry.
  *
  * This routine process a mailbox completion queue entry, it invokes the
- * proper mailbox complete handling or asynchrous event handling routine
+ * proper mailbox complete handling or asynchronous event handling routine
  * according to the MCQE's async bit.
  *
  * Return: true if work posted to worker thread, otherwise false.
index f6ac819e6e969a4282b2ea75c5e37aae663ff90b..8443f2f35be243419ca0f2951bdf747879d2ac91 100644 (file)
@@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter)
                goto out_free_raid_dev;
        }
 
-       raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+       raid_dev->baseaddr = ioremap(raid_dev->baseport, 128);
 
        if (!raid_dev->baseaddr) {
 
index a4bc8147928497caaac17f6c516943c263623c74..c60cd9fc4240b2c49ee97acb9e6f429665177b7c 100644 (file)
@@ -5875,7 +5875,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
        }
 
        base_addr = pci_resource_start(instance->pdev, instance->bar);
-       instance->reg_set = ioremap_nocache(base_addr, 8192);
+       instance->reg_set = ioremap(base_addr, 8192);
 
        if (!instance->reg_set) {
                dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
index 848fbec7bda6a27f56ccdce67434fef98c04137a..45fd8dfb7c4056fba801fe880e0647ef1d40f09a 100644 (file)
@@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                                        &ct->chain_buffer_dma);
                        if (!ct->chain_buffer) {
                                ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
-                               _base_release_memory_pools(ioc);
                                goto out;
                        }
                }
index 539ac8ce4fcd749b07c21ad1651fcef26591dbd5..d4bd31a75b9dbf8accf12d05c1c6349f9c823163 100644 (file)
@@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
        spin_lock_init(&cb->queue_lock);
        if (mmio_size < PAGE_SIZE)
                mmio_size = PAGE_SIZE;
-       cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+       cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
        if (cb->mmio_base == NULL) {
                dev_err(&pdev->dev,
                        "Unable to map Controller Register Window\n");
index eb0dd566330abdab242b3953b7641f60a0342ce3..5c5666491c2ee87f895363298abd34f2b8d07fbe 100644 (file)
@@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
        /* Map the Controller Register Window. */
        if (mmio_size < PAGE_SIZE)
                mmio_size = PAGE_SIZE;
-       cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+       cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
        if (cs->mmio_base == NULL) {
                dev_err(&pdev->dev,
                        "Unable to map Controller Register Window\n");
index 93616f9fd6d76ce5d3ac7a14a4321b52af3a6c45..d79ce97a04bd77478d8a9dd1ad0d985044b11d3d 100644 (file)
@@ -1560,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
                        goto next_entry;
 
                data->MmioAddress = (unsigned long)
-                       ioremap_nocache(p_dev->resource[2]->start,
+                       ioremap(p_dev->resource[2]->start,
                                        resource_size(p_dev->resource[2]));
                data->MmioLength  = resource_size(p_dev->resource[2]);
        }
index ae97e2f310a36bbb0802160b7db8af9f1b4b3b5b..d7e7043f9eab283c69ed417da79a28ff5e5f50c0 100644 (file)
@@ -178,6 +178,7 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
 
        faddr = ha->flt_region_nvram;
        if (IS_QLA28XX(ha)) {
+               qla28xx_get_aux_images(vha, &active_regions);
                if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
                        faddr = ha->flt_region_nvram_sec;
        }
index 99f0a1a08143e0864e0ba69718945fe538ffd447..cbaf178fc9796a976b1a008afe6c0a5f1c73062e 100644 (file)
@@ -2399,7 +2399,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
        struct qla_active_regions regions = { };
        struct active_regions active_regions = { };
 
-       qla28xx_get_aux_images(vha, &active_regions);
+       qla27xx_get_active_image(vha, &active_regions);
        regions.global_image = active_regions.global;
 
        if (IS_QLA28XX(ha)) {
index 460f443f64716852cff956f7fca3d87431ea2d4a..2edd9f7b30742e990a879d1a6e6705a1e7eecabf 100644 (file)
@@ -2401,6 +2401,7 @@ typedef struct fc_port {
        unsigned int id_changed:1;
        unsigned int scan_needed:1;
        unsigned int n2n_flag:1;
+       unsigned int explicit_logout:1;
 
        struct completion nvme_del_done;
        uint32_t nvme_prli_service_param;
index 59f6903e5abe3188aadc0f1eaed1d64ac3cd41e7..9dc09c1174169b6b5f5666dbaee9b62f76fcf627 100644 (file)
@@ -1523,6 +1523,10 @@ struct qla_flt_header {
 #define FLT_REG_NVRAM_SEC_28XX_1       0x10F
 #define FLT_REG_NVRAM_SEC_28XX_2       0x111
 #define FLT_REG_NVRAM_SEC_28XX_3       0x113
+#define FLT_REG_MPI_PRI_28XX           0xD3
+#define FLT_REG_MPI_SEC_28XX           0xF0
+#define FLT_REG_PEP_PRI_28XX           0xD1
+#define FLT_REG_PEP_SEC_28XX           0xF1
 
 struct qla_flt_region {
        uint16_t code;
index 6c28f38f8021a7f0ef8adde275866155e1e30738..aa5204163becadc2e88133779eefaafde4e79103 100644 (file)
@@ -533,6 +533,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 
        e->u.fcport.fcport = fcport;
        fcport->flags |= FCF_ASYNC_ACTIVE;
+       fcport->disc_state = DSC_LOGIN_PEND;
        return qla2x00_post_work(vha, e);
 }
 
@@ -1526,8 +1527,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                }
        }
 
-       /* for pure Target Mode. Login will not be initiated */
-       if (vha->host->active_mode == MODE_TARGET)
+       /* Target won't initiate port login if fabric is present */
+       if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
                return 0;
 
        if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1719,6 +1720,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
                                      struct event_arg *ea)
 {
+       /* for pure Target Mode, PRLI will not be initiated */
+       if (vha->host->active_mode == MODE_TARGET)
+               return;
+
        ql_dbg(ql_dbg_disc, vha, 0x2118,
            "%s %d %8phC post PRLI\n",
            __func__, __LINE__, ea->fcport->port_name);
@@ -4852,6 +4857,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
        }
 
        INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+       INIT_WORK(&fcport->free_work, qlt_free_session_done);
        INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
        INIT_LIST_HEAD(&fcport->gnl_entry);
        INIT_LIST_HEAD(&fcport->list);
@@ -4930,14 +4936,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
                set_bit(RSCN_UPDATE, &flags);
                clear_bit(LOCAL_LOOP_UPDATE, &flags);
 
-       } else if (ha->current_topology == ISP_CFG_N) {
-               clear_bit(RSCN_UPDATE, &flags);
-               if (qla_tgt_mode_enabled(vha)) {
-                       /* allow the other side to start the login */
-                       clear_bit(LOCAL_LOOP_UPDATE, &flags);
-                       set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-               }
-       } else if (ha->current_topology == ISP_CFG_NL) {
+       } else if (ha->current_topology == ISP_CFG_NL ||
+                  ha->current_topology == ISP_CFG_N) {
                clear_bit(RSCN_UPDATE, &flags);
                set_bit(LOCAL_LOOP_UPDATE, &flags);
        } else if (!vha->flags.online ||
@@ -5054,7 +5054,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                                memcpy(&ha->plogi_els_payld.data,
                                    (void *)ha->init_cb,
                                    sizeof(ha->plogi_els_payld.data));
-                               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                        } else {
                                ql_dbg(ql_dbg_init, vha, 0x00d1,
                                    "PLOGI ELS param read fail.\n");
index b25f87ff8cdee65e9739a65c5e7f4a26d2526049..8b050f0b43330543f365a4f8e83eb2ae67342813 100644 (file)
@@ -2405,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
 static void
 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
 {
+       u16 control_flags = LCF_COMMAND_LOGO;
        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
-       logio->control_flags =
-           cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
-       if (!sp->fcport->keep_nport_handle)
-               logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+
+       if (sp->fcport->explicit_logout) {
+               control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
+       } else {
+               control_flags |= LCF_IMPL_LOGO;
+
+               if (!sp->fcport->keep_nport_handle)
+                       control_flags |= LCF_FREE_NPORT;
+       }
+
+       logio->control_flags = cpu_to_le16(control_flags);
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2617,6 +2625,10 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
 
        memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
            sizeof(struct els_logo_payload));
+       ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
+       ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
+                      elsio->u.els_logo.els_logo_pyld,
+                      sizeof(*elsio->u.els_logo.els_logo_pyld));
 
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
@@ -2676,7 +2688,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
                    "PLOGI ELS IOCB:\n");
                ql_dump_buffer(ql_log_info, vha, 0x0109,
-                   (uint8_t *)els_iocb, 0x70);
+                   (uint8_t *)els_iocb,
+                   sizeof(*els_iocb));
        } else {
                els_iocb->control_flags = 1 << 13;
                els_iocb->tx_byte_count =
@@ -2688,6 +2701,11 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
                els_iocb->rx_byte_count = 0;
                els_iocb->rx_address = 0;
                els_iocb->rx_len = 0;
+               ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
+                      "LOGO ELS IOCB:");
+               ql_dump_buffer(ql_log_info, vha, 0x010b,
+                              els_iocb,
+                              sizeof(*els_iocb));
        }
 
        sp->vha->qla_stats.control_requests++;
@@ -2934,7 +2952,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
 
        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
-           (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
+           (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
+           sizeof(*elsio->u.els_plogi.els_plogi_pyld));
 
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
index 2601d7673c37dd9070d5b94018ab9a14054ac94c..7b8a6bfcf08d97c1cc1421cd67eb53dfbd8ea9c9 100644 (file)
@@ -1061,8 +1061,6 @@ global_port_update:
                        ql_dbg(ql_dbg_async, vha, 0x5011,
                            "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
                            mb[1], mb[2], mb[3]);
-
-                       qlt_async_event(mb[0], vha, mb);
                        break;
                }
 
@@ -1079,8 +1077,6 @@ global_port_update:
                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                set_bit(VP_CONFIG_OK, &vha->vp_flags);
-
-               qlt_async_event(mb[0], vha, mb);
                break;
 
        case MBA_RSCN_UPDATE:           /* State Change Registration */
index 0cf94f05f0080623ec396ace7932fdc27efee40e..b7c1108c48e208f2e634309acd84d46b182bd7e3 100644 (file)
@@ -3921,6 +3921,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                        vha->d_id.b24 = 0;
                                        vha->d_id.b.al_pa = 1;
                                        ha->flags.n2n_bigger = 1;
+                                       ha->flags.n2n_ae = 0;
 
                                        id.b.al_pa = 2;
                                        ql_dbg(ql_dbg_async, vha, 0x5075,
@@ -3931,6 +3932,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                            "Format 1: Remote login - Waiting for WWPN %8phC.\n",
                                            rptid_entry->u.f1.port_name);
                                        ha->flags.n2n_bigger = 0;
+                                       ha->flags.n2n_ae = 1;
                                }
                                qla24xx_post_newsess_work(vha, &id,
                                    rptid_entry->u.f1.port_name,
@@ -3942,7 +3944,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        /* if our portname is higher then initiate N2N login */
 
                        set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
-                       ha->flags.n2n_ae = 1;
                        return;
                        break;
                case TOPO_FL:
index 605b59c76c9010d8c82bbbfa464a02db077f7673..a3a44d4ace1e74386e2a363a894b4ad56fa906c8 100644 (file)
@@ -789,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
        }
 
        ha->cregbase =
-           ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+           ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
        if (!ha->cregbase) {
                ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
                    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -810,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha)
        }
 
        ha->iobase =
-           ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+           ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
        if (!ha->iobase) {
                ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
                    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
index 941aa53363f564a23a15cc306abff534283659ee..bfcd02fdf2b8915df121562303e0b9358b836f89 100644 (file)
@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 }
 
 static struct nvme_fc_port_template qla_nvme_fc_transport = {
+       .module = THIS_MODULE,
        .localport_delete = qla_nvme_localport_delete,
        .remoteport_delete = qla_nvme_remoteport_delete,
        .create_queue   = qla_nvme_alloc_queue,
index f2d5115b2d8d19c47aa41b23aee0fd491e5b81c0..bbe90354f49b09488fa2a3b126f1941694fbe7b1 100644 (file)
@@ -847,15 +847,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                                ha->flt_region_img_status_pri = start;
                        break;
                case FLT_REG_IMG_SEC_27XX:
-                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                                ha->flt_region_img_status_sec = start;
                        break;
                case FLT_REG_FW_SEC_27XX:
-                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                                ha->flt_region_fw_sec = start;
                        break;
                case FLT_REG_BOOTLOAD_SEC_27XX:
-                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                                ha->flt_region_boot_sec = start;
                        break;
                case FLT_REG_AUX_IMG_PRI_28XX:
@@ -2725,8 +2725,11 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
                ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
                    "Region %x is secure\n", region.code);
 
-               if (region.code == FLT_REG_FW ||
-                   region.code == FLT_REG_FW_SEC_27XX) {
+               switch (region.code) {
+               case FLT_REG_FW:
+               case FLT_REG_FW_SEC_27XX:
+               case FLT_REG_MPI_PRI_28XX:
+               case FLT_REG_MPI_SEC_28XX:
                        fw_array = dwptr;
 
                        /* 1st fw array */
@@ -2757,9 +2760,23 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
                                buf_size_without_sfub += risc_size;
                                fw_array += risc_size;
                        }
-               } else {
-                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
-                           "Secure region %x not supported\n",
+                       break;
+
+               case FLT_REG_PEP_PRI_28XX:
+               case FLT_REG_PEP_SEC_28XX:
+                       fw_array = dwptr;
+
+                       /* 1st fw array */
+                       risc_size = be32_to_cpu(fw_array[3]);
+                       risc_attr = be32_to_cpu(fw_array[9]);
+
+                       buf_size_without_sfub = risc_size;
+                       fw_array += risc_size;
+                       break;
+
+               default:
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha,
+                           0xffff, "Secure region %x not supported\n",
                            region.code);
                        rval = QLA_COMMAND_ERROR;
                        goto done;
@@ -2880,7 +2897,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
                            "Sending Secure Flash MB Cmd\n");
                        rval = qla28xx_secure_flash_update(vha, 0, region.code,
                                buf_size_without_sfub, sfub_dma,
-                               sizeof(struct secure_flash_update_block));
+                               sizeof(struct secure_flash_update_block) >> 2);
                        if (rval != QLA_SUCCESS) {
                                ql_log(ql_log_warn, vha, 0xffff,
                                    "Secure Flash MB Cmd failed %x.", rval);
index 51b275a575a52b37ecd0659c88e763bfaf0de2c9..68c14143e50e4884f4057ad6071b662d9bd38378 100644 (file)
@@ -1104,6 +1104,7 @@ void qlt_free_session_done(struct work_struct *work)
                }
        }
 
+       sess->explicit_logout = 0;
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
        sess->free_pending = 0;
 
@@ -1160,7 +1161,6 @@ void qlt_unreg_sess(struct fc_port *sess)
        sess->last_rscn_gen = sess->rscn_gen;
        sess->last_login_gen = sess->login_gen;
 
-       INIT_WORK(&sess->free_work, qlt_free_session_done);
        queue_work(sess->vha->hw->wq, &sess->free_work);
 }
 EXPORT_SYMBOL(qlt_unreg_sess);
@@ -1265,7 +1265,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
            "Scheduling sess %p for deletion %8phC\n",
            sess, sess->port_name);
 
-       INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
        WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
 }
 
@@ -4804,6 +4803,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
 
        switch (sess->disc_state) {
        case DSC_DELETED:
+       case DSC_LOGIN_PEND:
                qlt_plogi_ack_unref(vha, pla);
                break;
 
index 042a24314edcfd476f11183cf0cfe05ebd8712c3..abe7f79bb78954b2eb5005e20821ebf3e0efe880 100644 (file)
@@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
  */
 static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
 {
+       if (!mcmd)
+               return;
        INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
        queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
 }
@@ -348,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
        target_sess_cmd_list_set_waiting(se_sess);
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
+       sess->explicit_logout = 1;
        tcm_qla2xxx_put_sess(sess);
 }
 
index 8c674eca09f1368ef70618e12b1a4f9001d2602b..2323432a0edbcd07345235263a9bf8a610817b97 100644 (file)
@@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
        return QLA_SUCCESS;
 
 mem_alloc_error_exit:
-       qla4xxx_mem_free(ha);
        return QLA_ERROR;
 }
 
index 417b868d8735eab8cf1a6dabb669cf0a0fc875a9..ed8d9709b9b96ef4c70feb530bc346c50052bef6 100644 (file)
@@ -24,6 +24,8 @@
 
 #define ISCSI_TRANSPORT_VERSION "2.0-870"
 
+#define ISCSI_SEND_MAX_ALLOWED  10
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/iscsi.h>
 
@@ -3682,6 +3684,7 @@ iscsi_if_rx(struct sk_buff *skb)
                struct nlmsghdr *nlh;
                struct iscsi_uevent *ev;
                uint32_t group;
+               int retries = ISCSI_SEND_MAX_ALLOWED;
 
                nlh = nlmsg_hdr(skb);
                if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
@@ -3712,6 +3715,10 @@ iscsi_if_rx(struct sk_buff *skb)
                                break;
                        err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
                                                  ev, sizeof(*ev));
+                       if (err == -EAGAIN && --retries < 0) {
+                               printk(KERN_WARNING "Send reply failed, error %d\n", err);
+                               break;
+                       }
                } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
                skb_pull(skb, rlen);
        }
index cea625906440ab6f50f16006e4056196b7cb4f99..902b649fc8efc29a034f3f808e0ae1f38a86c698 100644 (file)
@@ -2211,8 +2211,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
        u8 type;
        int ret = 0;
 
-       if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+       if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+               sdkp->protection_type = 0;
                return ret;
+       }
 
        type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
 
@@ -2956,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                q->limits.zoned = BLK_ZONED_HM;
        } else {
                sdkp->zoned = (buffer[8] >> 4) & 3;
-               if (sdkp->zoned == 1)
+               if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
                        /* Host-aware */
                        q->limits.zoned = BLK_ZONED_HA;
-               else
+               } else {
                        /*
-                        * Treat drive-managed devices as
-                        * regular block devices.
+                        * Treat drive-managed devices and host-aware devices
+                        * with partitions as regular block devices.
                         */
                        q->limits.zoned = BLK_ZONED_NONE;
+               }
        }
        if (blk_queue_is_zoned(q) && sdkp->first_scan)
                sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
index 7b7ef3acb504c06c95849212c5879a7931ffecee..b7492568e02f9d05b0c64ef3ddb22e35c01a06ed 100644 (file)
@@ -7457,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
                goto disable_device;
        }
 
-       ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+       ctrl_info->iomem_base = ioremap(pci_resource_start(
                ctrl_info->pci_dev, 0),
                sizeof(struct pqi_ctrl_registers));
        if (!ctrl_info->iomem_base) {
@@ -8689,11 +8689,11 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
                data.delete_operational_queue.queue_id) != 12);
        BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
-       BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+       BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
                data.create_operational_iq) != 64 - 11);
-       BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+       BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
                data.create_operational_oq) != 64 - 11);
-       BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+       BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
                data.delete_operational_queue) != 64 - 11);
 
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
index a85d52b5dc32094d04219b548619c2b7ea1de23f..f8397978f8ab57c20335d9740146db2ce2eeb155 100644 (file)
@@ -71,7 +71,7 @@ static int snirm710_probe(struct platform_device *dev)
 
        hostdata->dev = &dev->dev;
        dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
-       hostdata->base = ioremap_nocache(base, 0x100);
+       hostdata->base = ioremap(base, 0x100);
        hostdata->differential = 0;
 
        hostdata->clock = SNIRM710_CLOCK;
index f8faf8b3d9652feca0867387045d433e84a45cb0..fb41636519ee80a6dd92723311783b84346e7c63 100644 (file)
@@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device,
         */
        host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
        /*
+        * For non-IDE disks, the host supports multiple channels.
         * Set the number of HW queues we are supporting.
         */
-       host->nr_hw_queues = num_present_cpus();
+       if (!dev_is_ide)
+               host->nr_hw_queues = num_present_cpus();
 
        /*
         * Set the error handler work queue.
index 440a73eae64766dcfac5faac48735da51639eb4a..f37df79e37e1b6b735ddc715eb25d6e5d8490878 100644 (file)
@@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
        if (!res || !res->start)
                goto fail_unlink;
 
-       esp->regs = ioremap_nocache(res->start, 0x20);
+       esp->regs = ioremap(res->start, 0x20);
        if (!esp->regs)
                goto fail_unmap_regs;
 
@@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
        if (!res || !res->start)
                goto fail_unmap_regs;
 
-       esp->dma_regs = ioremap_nocache(res->start, 0x10);
+       esp->dma_regs = ioremap(res->start, 0x10);
 
        esp->command_block = dma_alloc_coherent(esp->dev, 16,
                                                &esp->command_block_dma,
index b2af04c57a39b3f7675b83de97ea54196dde0fab..6feeb0faf123af11818f9a84207102d67d12e2bb 100644 (file)
@@ -99,6 +99,12 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
         */
        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
 
+       /*
+        * Disabling Autohibern8 feature in cadence UFS
+        * to mask unexpected interrupt trigger.
+        */
+       hba->ahit = 0;
+
        return 0;
 }
 
index baeecee35d1e1229646ae0199d3d37fc023ae1b6..53dd87628cbe4a26997ceaf725ad90dff93418f9 100644 (file)
@@ -203,7 +203,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
        bsg_dev->parent = get_device(parent);
        bsg_dev->release = ufs_bsg_node_release;
 
-       dev_set_name(bsg_dev, "ufs-bsg");
+       dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
 
        ret = device_add(bsg_dev);
        if (ret)
index 77bce208210eb3dafcedff5e1e95ec31b0053e87..7eac76cccc4c6466f06af1ace337cb7324dc8f92 100644 (file)
@@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev)
        struct gsc_irq gsc_irq;
        u32 zalon_vers;
        int error = -ENODEV;
-       void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096);
+       void __iomem *zalon = ioremap(dev->hpa.start, 4096);
        void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
        static int unit = 0;
        struct Scsi_Host *host;
index a23a8e5794f5c56233d1fe453f796e5e6f251920..bdd82e497d5fcb7c683fd3711cdc938d99509103 100644 (file)
@@ -801,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
        /* additional setup required for Fastlane */
        if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
                /* map full address space up to ESP base for DMA */
-               zep->board_base = ioremap_nocache(board,
+               zep->board_base = ioremap(board,
                                                FASTLANE_ESP_ADDR-1);
                if (!zep->board_base) {
                        pr_err("Cannot allocate board address space\n");
@@ -816,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
        esp->ops = zdd->esp_ops;
 
        if (ioaddr > 0xffffff)
-               esp->regs = ioremap_nocache(ioaddr, 0x20);
+               esp->regs = ioremap(ioaddr, 0x20);
        else
                /* ZorroII address space remapped nocache by early startup */
                esp->regs = ZTWO_VADDR(ioaddr);
@@ -842,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
                 * Only Fastlane Z3 for now - add switch for correct struct
                 * dma_registers size if adding any more
                 */
-               esp->dma_regs = ioremap_nocache(dmaaddr,
+               esp->dma_regs = ioremap(dmaaddr,
                                sizeof(struct fastlane_dma_registers));
        } else
                /* ZorroII address space remapped nocache by early startup */
index 9475353f49d6c55455ead3ebcec759d55ce4a922..d996782a710642cd6da617457e0ed7ac682d642f 100644 (file)
@@ -368,7 +368,7 @@ static int clk_establish_mapping(struct clk *clk)
        if (!mapping->base && mapping->phys) {
                kref_init(&mapping->ref);
 
-               mapping->base = ioremap_nocache(mapping->phys, mapping->len);
+               mapping->base = ioremap(mapping->phys, mapping->len);
                if (unlikely(!mapping->base))
                        return -ENXIO;
        } else if (mapping->base) {
index 8485e812d9b2a7eb74efd2ee106626b399862ae1..f8e070d67fa3266d8c921c58dfb92c74373a3f26 100644 (file)
@@ -213,7 +213,7 @@ int __init register_intc_controller(struct intc_desc *desc)
                        WARN_ON(resource_type(res) != IORESOURCE_MEM);
                        d->window[k].phys = res->start;
                        d->window[k].size = resource_size(res);
-                       d->window[k].virt = ioremap_nocache(res->start,
+                       d->window[k].virt = ioremap(res->start,
                                                         resource_size(res));
                        if (!d->window[k].virt)
                                goto err2;
index 87d69e7471f97e789c23174c4f3ff1245b037b2f..f9f043a3d90a234134697519430c0fe1fa532660 100644 (file)
@@ -73,7 +73,7 @@ int register_intc_userimask(unsigned long addr)
        if (unlikely(uimask))
                return -EBUSY;
 
-       uimask = ioremap_nocache(addr, SZ_4K);
+       uimask = ioremap(addr, SZ_4K);
        if (unlikely(!uimask))
                return -ENOMEM;
 
index 833e04a7835c56b80f77af30db86af5f38d0e4c7..1778f8c62861b4eb4fd7056168fbb1e4e68e2c4f 100644 (file)
@@ -14,6 +14,7 @@ source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/renesas/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
 source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sifive/Kconfig"
 source "drivers/soc/sunxi/Kconfig"
 source "drivers/soc/tegra/Kconfig"
 source "drivers/soc/ti/Kconfig"
index 2ec3550035243791054161097517da45366c3a15..8b49d782a1ab7cd9fcb1e8ed121ee4b712f82081 100644 (file)
@@ -20,6 +20,7 @@ obj-y                         += qcom/
 obj-y                          += renesas/
 obj-$(CONFIG_ARCH_ROCKCHIP)    += rockchip/
 obj-$(CONFIG_SOC_SAMSUNG)      += samsung/
+obj-$(CONFIG_SOC_SIFIVE)       += sifive/
 obj-y                          += sunxi/
 obj-$(CONFIG_ARCH_TEGRA)       += tegra/
 obj-y                          += ti/
index 5823f5b67d1619988451ab1d097f2a46881ac7a9..3f0261d53ad9528b6d2d012a6b4eca49c422b33f 100644 (file)
@@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
                                     struct meson_ee_pwrc *pwrc,
                                     struct meson_ee_pwrc_domain *dom)
 {
+       int ret;
+
        dom->pwrc = pwrc;
        dom->num_rstc = dom->desc.reset_names_count;
        dom->num_clks = dom->desc.clk_names_count;
@@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
          * prepare/enable counters won't be in sync.
          */
        if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
-               int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+               ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
                if (ret)
                        return ret;
 
-               pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
-       } else
-               pm_genpd_init(&dom->base, NULL,
-                             (dom->desc.get_power ?
-                              dom->desc.get_power(dom) : true));
+               ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
+                                   false);
+               if (ret)
+                       return ret;
+       } else {
+               ret = pm_genpd_init(&dom->base, NULL,
+                                   (dom->desc.get_power ?
+                                    dom->desc.get_power(dom) : true));
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
@@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
                pwrc->xlate.domains[i] = &dom->base;
        }
 
-       of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
-
-       return 0;
+       return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
 }
 
 static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
new file mode 100644 (file)
index 0000000..58cf8c4
--- /dev/null
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_SIFIVE
+
+config SIFIVE_L2
+       bool "Sifive L2 Cache controller"
+       help
+         Support for the L2 cache controller on SiFive platforms.
+
+endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
new file mode 100644 (file)
index 0000000..b5caff7
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SIFIVE_L2)        += sifive_l2_cache.o
similarity index 99%
rename from arch/riscv/mm/sifive_l2_cache.c
rename to drivers/soc/sifive/sifive_l2_cache.c
index a9ffff3277c77fa7b1847e45b890689aed909d7c..a5069394cd617771681cc7372167f87e2ac9a9de 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
 
 #define SIFIVE_L2_DIRECCFIX_LOW 0x100
 #define SIFIVE_L2_DIRECCFIX_HIGH 0x104
index eb96a3086d6d3a565fb1260cc6281548c6d83e01..5db919d96aba5811f53cb94e532d57693e8374ec 100644 (file)
@@ -219,7 +219,7 @@ static int __init tegra_flowctrl_init(void)
                return 0;
        }
 
-       tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+       tegra_flowctrl_base = ioremap(res.start, resource_size(&res));
        if (!tegra_flowctrl_base)
                return -ENXIO;
 
index 4d719d4b8d5a04f87a43879a214a79b809827f57..606abbe55bbaf717e9f5db1a6339ba707bed8990 100644 (file)
@@ -408,7 +408,7 @@ static int __init tegra_init_fuse(void)
                }
        }
 
-       fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+       fuse->base = ioremap(regs.start, resource_size(&regs));
        if (!fuse->base) {
                pr_err("failed to map FUSE registers\n");
                return -ENXIO;
index df76778af601e66be1ce58695c25035573cf4b6e..a2fd6ccd48f92447d8eb27f27304365e79e500ae 100644 (file)
@@ -159,11 +159,11 @@ void __init tegra_init_apbmisc(void)
                }
        }
 
-       apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+       apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
        if (!apbmisc_base)
                pr_err("failed to map APBMISC registers\n");
 
-       strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
+       strapping_base = ioremap(straps.start, resource_size(&straps));
        if (!strapping_base)
                pr_err("failed to map strapping options registers\n");
 
index ea0e11a09c120cdde96c28a315b9ded1f8a88ec3..1699dda6b393a664f5b74dadfa315c244a7a80c4 100644 (file)
@@ -2826,7 +2826,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
 
        of_address_to_resource(np, index, &regs);
 
-       wake = ioremap_nocache(regs.start, resource_size(&regs));
+       wake = ioremap(regs.start, resource_size(&regs));
        if (!wake) {
                dev_err(pmc->dev, "failed to map PMC wake registers\n");
                return;
@@ -3097,7 +3097,7 @@ static int __init tegra_pmc_early_init(void)
                }
        }
 
-       pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
+       pmc->base = ioremap(regs.start, resource_size(&regs));
        if (!pmc->base) {
                pr_err("failed to map PMC registers\n");
                of_node_put(np);
index cf545f428d03b3b7649dda60417179f0fa88b69e..4486e055794c13233bd7c7adc5859ba6495747d8 100644 (file)
@@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS
          called ti_sci_pm_domains. Note this is needed early in boot before
          rootfs may be available.
 
+config TI_K3_RINGACC
+       bool "K3 Ring accelerator Sub System"
+       depends on ARCH_K3 || COMPILE_TEST
+       depends on TI_SCI_INTA_IRQCHIP
+       help
+         Say y here to support the K3 Ring accelerator module.
+         The Ring Accelerator (RINGACC or RA)  provides hardware acceleration
+         to enable straightforward passing of work between a producer
+         and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+         If unsure, say N.
+
 endif # SOC_TI
 
 config TI_SCI_INTA_MSI_DOMAIN
index 788b5cd1e18008d15610b6119cb19e4a76e09e2d..bec827937a5f3b3d1022b8d46ecae46a4d3e2140 100644 (file)
@@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS)          += omap_prm.o
 obj-$(CONFIG_WKUP_M3_IPC)              += wkup_m3_ipc.o
 obj-$(CONFIG_TI_SCI_PM_DOMAINS)                += ti_sci_pm_domains.o
 obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN)   += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC)            += k3-ringacc.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644 (file)
index 0000000..5fb2ee2
--- /dev/null
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK            GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+       u32     resv_16[4];
+       u32     db;
+       u32     resv_4[1];
+       u32     occ;
+       u32     indx;
+       u32     hwocc;
+       u32     hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP        0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+       u32     head_data[128];
+       u32     tail_data[128];
+       u32     peek_head_data[128];
+       u32     peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+       u32     revision;
+       u32     config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK              GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+       u32     control;
+       u32     status;
+       u8      resv_512[504];
+       u32     data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP   0x1000
+#define K3_RINGACC_PROXY_NOT_USED      (-1)
+
+enum k3_ringacc_proxy_access_mode {
+       PROXY_ACCESS_MODE_HEAD = 0,
+       PROXY_ACCESS_MODE_TAIL = 1,
+       PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+       PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES  (512U)
+#define K3_RINGACC_FIFO_REGS_STEP      0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT    (127U)
+
+struct k3_ring_ops {
+       int (*push_tail)(struct k3_ring *ring, void *elm);
+       int (*push_head)(struct k3_ring *ring, void *elm);
+       int (*pop_tail)(struct k3_ring *ring, void *elm);
+       int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @free: Number of free elements
+ * @occ: Ring occupancy
+ * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
+ * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+       struct k3_ring_rt_regs __iomem *rt;
+       struct k3_ring_fifo_regs __iomem *fifos;
+       struct k3_ringacc_proxy_target_regs  __iomem *proxy;
+       dma_addr_t      ring_mem_dma;
+       void            *ring_mem_virt;
+       struct k3_ring_ops *ops;
+       u32             size;
+       enum k3_ring_size elm_size;
+       enum k3_ring_mode mode;
+       u32             flags;
+#define K3_RING_FLAG_BUSY      BIT(1)
+#define K3_RING_FLAG_SHARED    BIT(2)
+       u32             free;
+       u32             occ;
+       u32             windex;
+       u32             rindex;
+       u32             ring_id;
+       struct k3_ringacc       *parent;
+       u32             use_count;
+       int             proxy_id;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ */
+struct k3_ringacc {
+       struct device *dev;
+       struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+       void __iomem *proxy_target_base;
+       u32 num_rings; /* number of rings in Ringacc module */
+       unsigned long *rings_inuse;
+       struct ti_sci_resource *rm_gp_range;
+
+       bool dma_ring_reset_quirk;
+       u32 num_proxies;
+       unsigned long *proxy_inuse;
+
+       struct k3_ring *rings;
+       struct list_head list;
+       struct mutex req_lock; /* protect rings allocation */
+
+       const struct ti_sci_handle *tisci;
+       const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+       u32 tisci_dev_id;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+       return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+              (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+       return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+               .push_tail = k3_ringacc_ring_push_mem,
+               .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+               .push_tail = k3_ringacc_ring_push_io,
+               .push_head = k3_ringacc_ring_push_head_io,
+               .pop_tail = k3_ringacc_ring_pop_tail_io,
+               .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+               .push_tail = k3_ringacc_ring_push_tail_proxy,
+               .push_head = k3_ringacc_ring_push_head_proxy,
+               .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+               .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+       struct device *dev = ring->parent->dev;
+
+       dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+       dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+               &ring->ring_mem_dma);
+       dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+               ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+
+       dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+       dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+       dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+       dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+       dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+       if (ring->ring_mem_virt)
+               print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+                                    16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+                                       int id, u32 flags)
+{
+       int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+       mutex_lock(&ringacc->req_lock);
+
+       if (id == K3_RINGACC_RING_ID_ANY) {
+               /* Request for any general purpose ring */
+               struct ti_sci_resource_desc *gp_rings =
+                                               &ringacc->rm_gp_range->desc[0];
+               unsigned long size;
+
+               size = gp_rings->start + gp_rings->num;
+               id = find_next_zero_bit(ringacc->rings_inuse, size,
+                                       gp_rings->start);
+               if (id == size)
+                       goto error;
+       } else if (id < 0) {
+               goto error;
+       }
+
+       if (test_bit(id, ringacc->rings_inuse) &&
+           !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+               goto error;
+       else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+               goto out;
+
+       if (flags & K3_RINGACC_RING_USE_PROXY) {
+               proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+                                             ringacc->num_proxies, 0);
+               if (proxy_id == ringacc->num_proxies)
+                       goto error;
+       }
+
+       if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+               set_bit(proxy_id, ringacc->proxy_inuse);
+               ringacc->rings[id].proxy_id = proxy_id;
+               dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+                       proxy_id);
+       } else {
+               dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+       }
+
+       set_bit(id, ringacc->rings_inuse);
+out:
+       ringacc->rings[id].use_count++;
+       mutex_unlock(&ringacc->req_lock);
+       return &ringacc->rings[id];
+
+error:
+       mutex_unlock(&ringacc->req_lock);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret;
+
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+                       ringacc->tisci_dev_id,
+                       ring->ring_id,
+                       0,
+                       0,
+                       ring->size,
+                       0,
+                       0,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+                       ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return;
+
+       ring->occ = 0;
+       ring->free = 0;
+       ring->rindex = 0;
+       ring->windex = 0;
+
+       k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+                                              enum k3_ring_mode mode)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret;
+
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+                       ringacc->tisci_dev_id,
+                       ring->ring_id,
+                       0,
+                       0,
+                       0,
+                       mode,
+                       0,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+                       ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return;
+
+       if (!ring->parent->dma_ring_reset_quirk)
+               goto reset;
+
+       if (!occ)
+               occ = readl(&ring->rt->occ);
+
+       if (occ) {
+               u32 db_ring_cnt, db_ring_cnt_cur;
+
+               dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+                       ring->ring_id, occ);
+               /* TI-SCI ring reset */
+               k3_ringacc_ring_reset_sci(ring);
+
+               /*
+                * Setup the ring in ring/doorbell mode (if not already in this
+                * mode)
+                */
+               if (ring->mode != K3_RINGACC_RING_MODE_RING)
+                       k3_ringacc_ring_reconfig_qmode_sci(
+                                       ring, K3_RINGACC_RING_MODE_RING);
+               /*
+                * Ring the doorbell 2**22 – ringOcc times.
+                * This will wrap the internal UDMAP ring state occupancy
+                * counter (which is 21-bits wide) to 0.
+                */
+               db_ring_cnt = (1U << 22) - occ;
+
+               while (db_ring_cnt != 0) {
+                       /*
+                        * Ring the doorbell with the maximum count each
+                        * iteration if possible to minimize the total
+                        * of writes
+                        */
+                       if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+                               db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+                       else
+                               db_ring_cnt_cur = db_ring_cnt;
+
+                       writel(db_ring_cnt_cur, &ring->rt->db);
+                       db_ring_cnt -= db_ring_cnt_cur;
+               }
+
+               /* Restore the original ring mode (if not ring mode) */
+               if (ring->mode != K3_RINGACC_RING_MODE_RING)
+                       k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+       }
+
+reset:
+       /* Reset the ring */
+       k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret;
+
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+                       ringacc->tisci_dev_id,
+                       ring->ring_id,
+                       0,
+                       0,
+                       0,
+                       0,
+                       0,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+                       ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc;
+
+       if (!ring)
+               return -EINVAL;
+
+       ringacc = ring->parent;
+
+       dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+       if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+               return -EINVAL;
+
+       mutex_lock(&ringacc->req_lock);
+
+       if (--ring->use_count)
+               goto out;
+
+       if (!(ring->flags & K3_RING_FLAG_BUSY))
+               goto no_init;
+
+       k3_ringacc_ring_free_sci(ring);
+
+       dma_free_coherent(ringacc->dev,
+                         ring->size * (4 << ring->elm_size),
+                         ring->ring_mem_virt, ring->ring_mem_dma);
+       ring->flags = 0;
+       ring->ops = NULL;
+       if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+               clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+               ring->proxy = NULL;
+               ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+       }
+
+no_init:
+       clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+       mutex_unlock(&ringacc->req_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+       if (!ring)
+               return -EINVAL;
+
+       return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+       if (!ring)
+               return -EINVAL;
+
+       return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+       int irq_num;
+
+       if (!ring)
+               return -EINVAL;
+
+       irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+       if (irq_num <= 0)
+               irq_num = -EINVAL;
+       return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       u32 ring_idx;
+       int ret;
+
+       if (!ringacc->tisci)
+               return -EINVAL;
+
+       ring_idx = ring->ring_id;
+       ret = ringacc->tisci_ring_ops->config(
+                       ringacc->tisci,
+                       TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+                       ringacc->tisci_dev_id,
+                       ring_idx,
+                       lower_32_bits(ring->ring_mem_dma),
+                       upper_32_bits(ring->ring_mem_dma),
+                       ring->size,
+                       ring->mode,
+                       ring->elm_size,
+                       0);
+       if (ret)
+               dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+                       ret, ring_idx);
+
+       return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+       struct k3_ringacc *ringacc = ring->parent;
+       int ret = 0;
+
+       if (!ring || !cfg)
+               return -EINVAL;
+       if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+           cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+           cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+           !test_bit(ring->ring_id, ringacc->rings_inuse))
+               return -EINVAL;
+
+       if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+           ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+           cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+               dev_err(ringacc->dev,
+                       "Message mode must use proxy for %u element size\n",
+                       4 << ring->elm_size);
+               return -EINVAL;
+       }
+
+       /*
+        * In case of shared ring only the first user (master user) can
+        * configure the ring. The sequence should be by the client:
+        * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+        * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+        * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+        * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+        */
+       if (ring->use_count != 1)
+               return 0;
+
+       ring->size = cfg->size;
+       ring->elm_size = cfg->elm_size;
+       ring->mode = cfg->mode;
+       ring->occ = 0;
+       ring->free = 0;
+       ring->rindex = 0;
+       ring->windex = 0;
+
+       if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+               ring->proxy = ringacc->proxy_target_base +
+                             ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+       switch (ring->mode) {
+       case K3_RINGACC_RING_MODE_RING:
+               ring->ops = &k3_ring_mode_ring_ops;
+               break;
+       case K3_RINGACC_RING_MODE_MESSAGE:
+               if (ring->proxy)
+                       ring->ops = &k3_ring_mode_proxy_ops;
+               else
+                       ring->ops = &k3_ring_mode_msg_ops;
+               break;
+       default:
+               ring->ops = NULL;
+               ret = -EINVAL;
+               goto err_free_proxy;
+       };
+
+       ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+                                       ring->size * (4 << ring->elm_size),
+                                       &ring->ring_mem_dma, GFP_KERNEL);
+       if (!ring->ring_mem_virt) {
+               dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+               ret = -ENOMEM;
+               goto err_free_ops;
+       }
+
+       ret = k3_ringacc_ring_cfg_sci(ring);
+
+       if (ret)
+               goto err_free_mem;
+
+       ring->flags |= K3_RING_FLAG_BUSY;
+       ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+                       K3_RING_FLAG_SHARED : 0;
+
+       k3_ringacc_ring_dump(ring);
+
+       return 0;
+
+err_free_mem:
+       dma_free_coherent(ringacc->dev,
+                         ring->size * (4 << ring->elm_size),
+                         ring->ring_mem_virt,
+                         ring->ring_mem_dma);
+err_free_ops:
+       ring->ops = NULL;
+err_free_proxy:
+       ring->proxy = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       if (!ring->free)
+               ring->free = ring->size - readl(&ring->rt->occ);
+
+       return ring->free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+       return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+       K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+       K3_RINGACC_ACCESS_MODE_POP_HEAD,
+       K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+       K3_RINGACC_ACCESS_MODE_POP_TAIL,
+       K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+       K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x)       (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x)     (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+                                    enum k3_ringacc_proxy_access_mode mode)
+{
+       u32 val;
+
+       val = ring->ring_id;
+       val |= K3_RINGACC_PROXY_MODE(mode);
+       val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+       writel(val, &ring->proxy->control);
+       return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+                                       enum k3_ringacc_access_mode access_mode)
+{
+       void __iomem *ptr;
+
+       ptr = (void __iomem *)&ring->proxy->data;
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+               k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               dev_dbg(ring->parent->dev,
+                       "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+               ring->occ--;
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+               dev_dbg(ring->parent->dev,
+                       "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_toio(ptr, elem, (4 << ring->elm_size));
+               ring->free--;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
+               ring->occ);
+       return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_proxy(ring, elem,
+                                           K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+                                    enum k3_ringacc_access_mode access_mode)
+{
+       void __iomem *ptr;
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+               ptr = (void __iomem *)&ring->fifos->head_data;
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               ptr = (void __iomem *)&ring->fifos->tail_data;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+       switch (access_mode) {
+       case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+       case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+               dev_dbg(ring->parent->dev,
+                       "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+               ring->occ--;
+               break;
+       case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+       case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+               dev_dbg(ring->parent->dev,
+                       "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+                       access_mode);
+               memcpy_toio(ptr, elem, (4 << ring->elm_size));
+               ring->free--;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
+               ring->windex, ring->occ, ring->rindex);
+       return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+       return k3_ringacc_ring_access_io(ring, elem,
+                                        K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+       void *elem_ptr;
+
+       elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+
+       memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+       ring->windex = (ring->windex + 1) % ring->size;
+       ring->free--;
+       writel(1, &ring->rt->db);
+
+       dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+               ring->free, ring->windex);
+
+       return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+       void *elem_ptr;
+
+       elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+
+       memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+       ring->rindex = (ring->rindex + 1) % ring->size;
+       ring->occ--;
+       writel(-1, &ring->rt->db);
+
+       dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+               ring->occ, ring->rindex, elem_ptr);
+       return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
+               ring->windex);
+
+       if (k3_ringacc_ring_is_full(ring))
+               return -ENOMEM;
+
+       if (ring->ops && ring->ops->push_tail)
+               ret = ring->ops->push_tail(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+               ring->free, ring->windex);
+
+       if (k3_ringacc_ring_is_full(ring))
+               return -ENOMEM;
+
+       if (ring->ops && ring->ops->push_head)
+               ret = ring->ops->push_head(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       if (!ring->occ)
+               ring->occ = k3_ringacc_ring_get_occ(ring);
+
+       dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
+               ring->rindex);
+
+       if (!ring->occ)
+               return -ENODATA;
+
+       if (ring->ops && ring->ops->pop_head)
+               ret = ring->ops->pop_head(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+               return -EINVAL;
+
+       if (!ring->occ)
+               ring->occ = k3_ringacc_ring_get_occ(ring);
+
+       dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
+               ring->rindex);
+
+       if (!ring->occ)
+               return -ENODATA;
+
+       if (ring->ops && ring->ops->pop_tail)
+               ret = ring->ops->pop_tail(ring, elem);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+                                               const char *property)
+{
+       struct device_node *ringacc_np;
+       struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+       struct k3_ringacc *entry;
+
+       ringacc_np = of_parse_phandle(np, property, 0);
+       if (!ringacc_np)
+               return ERR_PTR(-ENODEV);
+
+       mutex_lock(&k3_ringacc_list_lock);
+       list_for_each_entry(entry, &k3_ringacc_list, list)
+               if (entry->dev->of_node == ringacc_np) {
+                       ringacc = entry;
+                       break;
+               }
+       mutex_unlock(&k3_ringacc_list_lock);
+       of_node_put(ringacc_np);
+
+       return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+       struct device_node *node = ringacc->dev->of_node;
+       struct device *dev = ringacc->dev;
+       struct platform_device *pdev = to_platform_device(dev);
+       int ret;
+
+       if (!node) {
+               dev_err(dev, "device tree info unavailable\n");
+               return -ENODEV;
+       }
+
+       ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+       if (ret) {
+               dev_err(dev, "ti,num-rings read failure %d\n", ret);
+               return ret;
+       }
+
+       ringacc->dma_ring_reset_quirk =
+                       of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+
+       ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+       if (IS_ERR(ringacc->tisci)) {
+               ret = PTR_ERR(ringacc->tisci);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "ti,sci read fail %d\n", ret);
+               ringacc->tisci = NULL;
+               return ret;
+       }
+
+       ret = of_property_read_u32(node, "ti,sci-dev-id",
+                                  &ringacc->tisci_dev_id);
+       if (ret) {
+               dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+               return ret;
+       }
+
+       pdev->id = ringacc->tisci_dev_id;
+
+       ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+                                               ringacc->tisci_dev_id,
+                                               "ti,sci-rm-range-gp-rings");
+       if (IS_ERR(ringacc->rm_gp_range)) {
+               dev_err(dev, "Failed to allocate MSI interrupts\n");
+               return PTR_ERR(ringacc->rm_gp_range);
+       }
+
+       return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+                                                ringacc->rm_gp_range);
+}
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+       struct k3_ringacc *ringacc;
+       void __iomem *base_fifo, *base_rt;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret, i;
+
+       ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+       if (!ringacc)
+               return -ENOMEM;
+
+       ringacc->dev = dev;
+       mutex_init(&ringacc->req_lock);
+
+       dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+                                           DOMAIN_BUS_TI_SCI_INTA_MSI);
+       if (!dev->msi_domain) {
+               dev_err(dev, "Failed to get MSI domain\n");
+               return -EPROBE_DEFER;
+       }
+
+       ret = k3_ringacc_probe_dt(ringacc);
+       if (ret)
+               return ret;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+       base_rt = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base_rt))
+               return PTR_ERR(base_rt);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+       base_fifo = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base_fifo))
+               return PTR_ERR(base_fifo);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+       ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ringacc->proxy_gcfg))
+               return PTR_ERR(ringacc->proxy_gcfg);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                          "proxy_target");
+       ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ringacc->proxy_target_base))
+               return PTR_ERR(ringacc->proxy_target_base);
+
+       ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+                                    K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+       ringacc->rings = devm_kzalloc(dev,
+                                     sizeof(*ringacc->rings) *
+                                     ringacc->num_rings,
+                                     GFP_KERNEL);
+       ringacc->rings_inuse = devm_kcalloc(dev,
+                                           BITS_TO_LONGS(ringacc->num_rings),
+                                           sizeof(unsigned long), GFP_KERNEL);
+       ringacc->proxy_inuse = devm_kcalloc(dev,
+                                           BITS_TO_LONGS(ringacc->num_proxies),
+                                           sizeof(unsigned long), GFP_KERNEL);
+
+       if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+               return -ENOMEM;
+
+       for (i = 0; i < ringacc->num_rings; i++) {
+               ringacc->rings[i].rt = base_rt +
+                                      K3_RINGACC_RT_REGS_STEP * i;
+               ringacc->rings[i].fifos = base_fifo +
+                                         K3_RINGACC_FIFO_REGS_STEP * i;
+               ringacc->rings[i].parent = ringacc;
+               ringacc->rings[i].ring_id = i;
+               ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+       }
+       dev_set_drvdata(dev, ringacc);
+
+       ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+       mutex_lock(&k3_ringacc_list_lock);
+       list_add_tail(&ringacc->list, &k3_ringacc_list);
+       mutex_unlock(&k3_ringacc_list_lock);
+
+       dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+                ringacc->num_rings,
+                ringacc->rm_gp_range->desc[0].start,
+                ringacc->rm_gp_range->desc[0].num,
+                ringacc->tisci_dev_id);
+       dev_info(dev, "dma-ring-reset-quirk: %s\n",
+                ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+       dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+                readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+       return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+       { .compatible = "ti,am654-navss-ringacc", },
+       {},
+};
+
+static struct platform_driver k3_ringacc_driver = {
+       .probe          = k3_ringacc_probe,
+       .driver         = {
+               .name   = "k3-ringacc",
+               .of_match_table = k3_ringacc_of_match,
+               .suppress_bind_attrs = true,
+       },
+};
+builtin_platform_driver(k3_ringacc_driver);
index 378369d9364ae4d6c4f24cdf40f5e4aa5b4fcc04..e9ece45d7a3334e7612cc4f61c616873472e312c 100644 (file)
@@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
        ret = rproc_boot(m3_ipc->rproc);
        if (ret)
                dev_err(dev, "rproc_boot failed\n");
+       else
+               m3_ipc_state = m3_ipc;
 
        do_exit(0);
 }
@@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
                goto err_put_rproc;
        }
 
-       m3_ipc_state = m3_ipc;
-
        return 0;
 
 err_put_rproc:
index a840c027213552f5d992d9ff76821aec6ae0fd4a..a3aa40996f13c71407d338bd289d2a7d90d6addd 100644 (file)
@@ -511,7 +511,7 @@ static int xvcu_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+       xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
                                                 resource_size(res));
        if (!xvcu->vcu_slcr_ba) {
                dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
@@ -524,7 +524,7 @@ static int xvcu_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+       xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
                                                     resource_size(res));
        if (!xvcu->logicore_reg_ba) {
                dev_err(&pdev->dev, "logicore register mapping failed.\n");
index 870f7797b56b9cf92cc0df6749dd2c0b61b0c4f9..d6ed0c355954f072e968b846ccec3e1f7278b777 100644 (file)
@@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI
          This controller does not support generic SPI messages. It only
          supports the high-level SPI memory interface.
 
+config SPI_HISI_SFC_V3XX
+       tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+       depends on (ARM64 && ACPI) || COMPILE_TEST
+       depends on HAS_IOMEM
+       select CONFIG_MTD_SPI_NOR
+       help
+         This enables support for HiSilicon v3xx SPI-NOR flash controller
+         found in hi16xx chipsets.
+
 config SPI_NXP_FLEXSPI
        tristate "NXP Flex SPI controller"
        depends on ARCH_LAYERSCAPE || HAS_IOMEM
index bb49c9e6d0a0c00cddf2275a84cb74fdc08ddf74..9b65ec5afc5e66507f9f694f356597613f6b3d12 100644 (file)
@@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI)           += spi-fsl-lpspi.o
 obj-$(CONFIG_SPI_FSL_QUADSPI)          += spi-fsl-qspi.o
 obj-$(CONFIG_SPI_FSL_SPI)              += spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)                 += spi-gpio.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX)                += spi-hisi-sfc-v3xx.o
 obj-$(CONFIG_SPI_IMG_SPFI)             += spi-img-spfi.o
 obj-$(CONFIG_SPI_IMX)                  += spi-imx.o
 obj-$(CONFIG_SPI_LANTIQ_SSC)           += spi-lantiq-ssc.o
index 56f0ca361deba481ff7807a6edf91eef58cc98e5..013458cabe3c6f05e2b47fa4384d87295ab4206b 100644 (file)
@@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master,
        master->dma_tx = dma_request_chan(dev, "tx");
        if (IS_ERR(master->dma_tx)) {
                err = PTR_ERR(master->dma_tx);
-               if (err == -EPROBE_DEFER) {
-                       dev_warn(dev, "no DMA channel available at the moment\n");
-                       goto error_clear;
-               }
-               dev_err(dev,
-                       "DMA TX channel not available, SPI unable to use DMA\n");
-               err = -EBUSY;
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev, "No TX DMA channel, DMA is disabled\n");
                goto error_clear;
        }
 
-       /*
-        * No reason to check EPROBE_DEFER here since we have already requested
-        * tx channel. If it fails here, it's for another reason.
-        */
-       master->dma_rx = dma_request_slave_channel(dev, "rx");
-
-       if (!master->dma_rx) {
-               dev_err(dev,
-                       "DMA RX channel not available, SPI unable to use DMA\n");
-               err = -EBUSY;
+       master->dma_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(master->dma_rx)) {
+               err = PTR_ERR(master->dma_rx);
+               /*
+                * No reason to check EPROBE_DEFER here since we have already
+                * requested tx channel.
+                */
+               dev_err(dev, "No RX DMA channel, DMA is disabled\n");
                goto error;
        }
 
@@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master,
 
        return 0;
 error:
-       if (master->dma_rx)
+       if (!IS_ERR(master->dma_rx))
                dma_release_channel(master->dma_rx);
        if (!IS_ERR(master->dma_tx))
                dma_release_channel(master->dma_tx);
index 85bad70f59e3e0c844683eac8adabb6337eb7575..23d295f36c80dad3daa1ea3a67e24a8ca5f1d2d6 100644 (file)
@@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
                name = qspi_irq_tab[val].irq_name;
                if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
                        /* get the l2 interrupts */
-                       irq = platform_get_irq_byname(pdev, name);
+                       irq = platform_get_irq_byname_optional(pdev, name);
                } else if (!num_ints && soc_intc) {
                        /* all mspi, bspi intrs muxed to one L1 intr */
                        irq = platform_get_irq(pdev, 0);
index fb61a620effc54d49afbfcd09b3a59c7e8fedd2e..11c235879bb73d80f2656c98523e900009ef84b9 100644 (file)
@@ -68,7 +68,7 @@
 #define BCM2835_SPI_FIFO_SIZE          64
 #define BCM2835_SPI_FIFO_SIZE_3_4      48
 #define BCM2835_SPI_DMA_MIN_LENGTH     96
-#define BCM2835_SPI_NUM_CS             3   /* raise as necessary */
+#define BCM2835_SPI_NUM_CS             4   /* raise as necessary */
 #define BCM2835_SPI_MODE_BITS  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
                                | SPI_NO_CS | SPI_3WIRE)
 
@@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
        }
 }
 
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
-                            struct bcm2835_spi *bs)
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+                           struct bcm2835_spi *bs)
 {
        struct dma_slave_config slave_config;
        const __be32 *addr;
@@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
        addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
        if (!addr) {
                dev_err(dev, "could not get DMA-register address - not using dma mode\n");
-               goto err;
+               /* Fall back to interrupt mode */
+               return 0;
        }
        dma_reg_base = be32_to_cpup(addr);
 
        /* get tx/rx dma */
-       ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
-       if (!ctlr->dma_tx) {
+       ctlr->dma_tx = dma_request_chan(dev, "tx");
+       if (IS_ERR(ctlr->dma_tx)) {
                dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+               ret = PTR_ERR(ctlr->dma_tx);
+               ctlr->dma_tx = NULL;
                goto err;
        }
-       ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
-       if (!ctlr->dma_rx) {
+       ctlr->dma_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(ctlr->dma_rx)) {
                dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+               ret = PTR_ERR(ctlr->dma_rx);
+               ctlr->dma_rx = NULL;
                goto err_release;
        }
 
@@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
        /* all went well, so set can_dma */
        ctlr->can_dma = bcm2835_spi_can_dma;
 
-       return;
+       return 0;
 
 err_config:
        dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
@@ -1005,7 +1010,14 @@ err_config:
 err_release:
        bcm2835_dma_release(ctlr, bs);
 err:
-       return;
+       /*
+        * Only report error for deferred probing, otherwise fall back to
+        * interrupt mode
+        */
+       if (ret != -EPROBE_DEFER)
+               ret = 0;
+
+       return ret;
 }
 
 static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
@@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
        bs->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(bs->clk)) {
                err = PTR_ERR(bs->clk);
-               dev_err(&pdev->dev, "could not get clk: %d\n", err);
+               if (err == -EPROBE_DEFER)
+                       dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+               else
+                       dev_err(&pdev->dev, "could not get clk: %d\n", err);
                goto out_controller_put;
        }
 
@@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
 
        clk_prepare_enable(bs->clk);
 
-       bcm2835_dma_init(ctlr, &pdev->dev, bs);
+       err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+       if (err)
+               goto out_clk_disable;
 
        /* initialise the hardware with the default polarities */
        bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
                               dev_name(&pdev->dev), ctlr);
        if (err) {
                dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
-               goto out_clk_disable;
+               goto out_dma_release;
        }
 
        err = devm_spi_register_controller(&pdev->dev, ctlr);
        if (err) {
                dev_err(&pdev->dev, "could not register SPI controller: %d\n",
                        err);
-               goto out_clk_disable;
+               goto out_dma_release;
        }
 
        bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
 
        return 0;
 
+out_dma_release:
+       bcm2835_dma_release(ctlr, bs);
 out_clk_disable:
        clk_disable_unprepare(bs->clk);
 out_controller_put:
index d84e22dd6f9f7d79384ae55617efbd75e84018e5..68491a8bf7b5b1a6ef3d33e9692c65758dfce19b 100644 (file)
@@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable)
 int spi_bitbang_init(struct spi_bitbang *bitbang)
 {
        struct spi_master *master = bitbang->master;
+       bool custom_cs;
 
-       if (!master || !bitbang->chipselect)
+       if (!master)
+               return -EINVAL;
+       /*
+        * We only need the chipselect callback if we are actually using it.
+        * If we just use GPIO descriptors, it is surplus. If the
+        * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+        * driver-specific chipselect routine.
+        */
+       custom_cs = (!master->use_gpio_descriptors ||
+                    (master->flags & SPI_MASTER_GPIO_SS));
+
+       if (custom_cs && !bitbang->chipselect)
                return -EINVAL;
 
        mutex_init(&bitbang->lock);
@@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
        master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
        master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
        master->transfer_one = spi_bitbang_transfer_one;
-       master->set_cs = spi_bitbang_set_cs;
+       /*
+        * When using GPIO descriptors, the ->set_cs() callback doesn't even
+        * get called unless SPI_MASTER_GPIO_SS is set.
+        */
+       if (custom_cs)
+               master->set_cs = spi_bitbang_set_cs;
 
        if (!bitbang->txrx_bufs) {
                bitbang->use_dma = 0;
index c36587b42e951eab806d4e9a12715db175619a8a..82a0ee09cbe148cf2407db213bae2cfec55312c1 100644 (file)
@@ -168,16 +168,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
 /**
  * cdns_spi_chipselect - Select or deselect the chip select line
  * @spi:       Pointer to the spi_device structure
- * @enable:    Select (1) or deselect (0) the chip select line
+ * @is_high:   Select(0) or deselect (1) the chip select line
  */
-static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
+static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
 {
        struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
        u32 ctrl_reg;
 
        ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
 
-       if (!enable) {
+       if (is_high) {
                /* Deselect the slave */
                ctrl_reg |= CDNS_SPI_CR_SSCTRL;
        } else {
index d12e149f1a419ed72ef65dd788d409eb68c00cbf..fd6b9caffaf0c41ca36434a7ae90538b319560bd 100644 (file)
@@ -82,6 +82,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
 
 error:
        clk_disable_unprepare(p->clk);
+       pci_release_regions(pdev);
        spi_master_put(master);
        return ret;
 }
@@ -96,6 +97,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
                return;
 
        clk_disable_unprepare(p->clk);
+       pci_release_regions(pdev);
        /* Put everything in a known state. */
        writeq(0, p->register_base + OCTEON_SPI_CFG(p));
 }
index 2663bb12d9ce63d03d3370c6c50f388d1b413ccf..0d86c37e0aeb5b93b5b792169a8b748435cb44d9 100644 (file)
@@ -301,7 +301,7 @@ int dw_spi_mid_init(struct dw_spi *dws)
        void __iomem *clk_reg;
        u32 clk_cdiv;
 
-       clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
+       clk_reg = ioremap(MRST_CLK_SPI_REG, 16);
        if (!clk_reg)
                return -ENOMEM;
 
index a92aa5cd4fbe8b1fd4433652e14ac9cef446af92..31e3f866d11a78be9786803548b5f936bfb1e057 100644 (file)
@@ -129,10 +129,11 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
        struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
        struct chip_data *chip = spi_get_ctldata(spi);
 
+       /* Chip select logic is inverted from spi_set_cs() */
        if (chip && chip->cs_control)
-               chip->cs_control(enable);
+               chip->cs_control(!enable);
 
-       if (enable)
+       if (!enable)
                dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
        else if (dws->cs_override)
                dw_writel(dws, DW_SPI_SER, 0);
@@ -171,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws)
 
 static void dw_writer(struct dw_spi *dws)
 {
-       u32 max = tx_max(dws);
+       u32 max;
        u16 txw = 0;
 
+       spin_lock(&dws->buf_lock);
+       max = tx_max(dws);
        while (max--) {
                /* Set the tx word if the transfer's original "tx" is not null */
                if (dws->tx_end - dws->len) {
@@ -185,13 +188,16 @@ static void dw_writer(struct dw_spi *dws)
                dw_write_io_reg(dws, DW_SPI_DR, txw);
                dws->tx += dws->n_bytes;
        }
+       spin_unlock(&dws->buf_lock);
 }
 
 static void dw_reader(struct dw_spi *dws)
 {
-       u32 max = rx_max(dws);
+       u32 max;
        u16 rxw;
 
+       spin_lock(&dws->buf_lock);
+       max = rx_max(dws);
        while (max--) {
                rxw = dw_read_io_reg(dws, DW_SPI_DR);
                /* Care rx only if the transfer's original "rx" is not null */
@@ -203,6 +209,7 @@ static void dw_reader(struct dw_spi *dws)
                }
                dws->rx += dws->n_bytes;
        }
+       spin_unlock(&dws->buf_lock);
 }
 
 static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -275,18 +282,23 @@ static int dw_spi_transfer_one(struct spi_controller *master,
 {
        struct dw_spi *dws = spi_controller_get_devdata(master);
        struct chip_data *chip = spi_get_ctldata(spi);
+       unsigned long flags;
        u8 imask = 0;
        u16 txlevel = 0;
        u32 cr0;
        int ret;
 
        dws->dma_mapped = 0;
-
+       spin_lock_irqsave(&dws->buf_lock, flags);
        dws->tx = (void *)transfer->tx_buf;
        dws->tx_end = dws->tx + transfer->len;
        dws->rx = transfer->rx_buf;
        dws->rx_end = dws->rx + transfer->len;
        dws->len = transfer->len;
+       spin_unlock_irqrestore(&dws->buf_lock, flags);
+
+       /* Ensure dw->rx and dw->rx_end are visible */
+       smp_mb();
 
        spi_enable_chip(dws, 0);
 
@@ -460,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        struct spi_controller *master;
        int ret;
 
-       BUG_ON(dws == NULL);
+       if (!dws)
+               return -EINVAL;
 
        master = spi_alloc_master(dev, 0);
        if (!master)
@@ -470,6 +483,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        dws->type = SSI_MOTO_SPI;
        dws->dma_inited = 0;
        dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+       spin_lock_init(&dws->buf_lock);
 
        spi_controller_set_devdata(master, dws);
 
index 38c7de1f0aa948c0fff3af98d596f53c3bfddd18..1bf5713e047d35c6f44fd329860fa0db490a3e61 100644 (file)
@@ -119,6 +119,7 @@ struct dw_spi {
        size_t                  len;
        void                    *tx;
        void                    *tx_end;
+       spinlock_t              buf_lock;
        void                    *rx;
        void                    *rx_end;
        int                     dma_mapped;
index 442cff71a0d2e95afdc378b0b1ad9d2d1b71653d..6ec2dcb8c57a6e969b59a5b5bd14d9dfe03f6f70 100644 (file)
@@ -185,6 +185,7 @@ struct fsl_dspi {
        struct spi_transfer                     *cur_transfer;
        struct spi_message                      *cur_msg;
        struct chip_data                        *cur_chip;
+       size_t                                  progress;
        size_t                                  len;
        const void                              *tx;
        void                                    *rx;
@@ -395,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
        if (!dma)
                return -ENOMEM;
 
-       dma->chan_rx = dma_request_slave_channel(dev, "rx");
-       if (!dma->chan_rx) {
+       dma->chan_rx = dma_request_chan(dev, "rx");
+       if (IS_ERR(dma->chan_rx)) {
                dev_err(dev, "rx dma channel not available\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(dma->chan_rx);
                return ret;
        }
 
-       dma->chan_tx = dma_request_slave_channel(dev, "tx");
-       if (!dma->chan_tx) {
+       dma->chan_tx = dma_request_chan(dev, "tx");
+       if (IS_ERR(dma->chan_tx)) {
                dev_err(dev, "tx dma channel not available\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(dma->chan_tx);
                goto err_tx_channel;
        }
 
@@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
        dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
 
        if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
-               /* Write two TX FIFO entries first, and then the corresponding
-                * CMD FIFO entry.
+               /* Write the CMD FIFO entry first, and then the two
+                * corresponding TX FIFO entries.
                 */
                u32 data = dspi_pop_tx(dspi);
 
-               if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
-                       /* LSB */
-                       tx_fifo_write(dspi, data & 0xFFFF);
-                       tx_fifo_write(dspi, data >> 16);
-               } else {
-                       /* MSB */
-                       tx_fifo_write(dspi, data >> 16);
-                       tx_fifo_write(dspi, data & 0xFFFF);
-               }
                cmd_fifo_write(dspi);
+               tx_fifo_write(dspi, data & 0xFFFF);
+               tx_fifo_write(dspi, data >> 16);
        } else {
                /* Write one entry to both TX FIFO and CMD FIFO
                 * simultaneously.
@@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
        u32 spi_tcr;
 
        spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
-                               dspi->tx - dspi->bytes_per_word, !dspi->irq);
+                               dspi->progress, !dspi->irq);
 
        /* Get transfer counter (in number of SPI transfers). It was
         * reset to 0 when transfer(s) were started.
@@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
        spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
        /* Update total number of bytes that were transferred */
        msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+       dspi->progress += spi_tcnt;
 
        trans_mode = dspi->devtype_data->trans_mode;
        if (trans_mode == DSPI_EOQ_MODE)
@@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi)
                return 0;
 
        spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
-                              dspi->tx, !dspi->irq);
+                              dspi->progress, !dspi->irq);
 
        if (trans_mode == DSPI_EOQ_MODE)
                dspi_eoq_write(dspi);
@@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
                dspi->rx = transfer->rx_buf;
                dspi->rx_end = dspi->rx + transfer->len;
                dspi->len = transfer->len;
+               dspi->progress = 0;
                /* Validated transfer specific frame size (defaults applied) */
                dspi->bits_per_word = transfer->bits_per_word;
                if (transfer->bits_per_word <= 8)
@@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
                                     SPI_CTARE_DTCP(1));
 
                spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
-                                      dspi->tx, !dspi->irq);
+                                      dspi->progress, !dspi->irq);
 
                trans_mode = dspi->devtype_data->trans_mode;
                switch (trans_mode) {
index 2cc0ddb4a9889e4826d2c31be6009713fb2fce59..d0b8cc741a24c1c9570a15281c3e8f2fa71ccf72 100644 (file)
@@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
                fsl_lpspi->watermark = fsl_lpspi->txfifosize;
 
        if (fsl_lpspi_can_dma(controller, spi, t))
-               fsl_lpspi->usedma = 1;
+               fsl_lpspi->usedma = true;
        else
-               fsl_lpspi->usedma = 0;
+               fsl_lpspi->usedma = false;
 
        return fsl_lpspi_config(fsl_lpspi);
 }
@@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
        fsl_lpspi->dev = &pdev->dev;
        fsl_lpspi->is_slave = is_slave;
 
+       controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+       controller->transfer_one = fsl_lpspi_transfer_one;
+       controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+       controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+       controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+       controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+       controller->dev.of_node = pdev->dev.of_node;
+       controller->bus_num = pdev->id;
+       controller->slave_abort = fsl_lpspi_slave_abort;
+
+       ret = devm_spi_register_controller(&pdev->dev, controller);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "spi_register_controller error.\n");
+               goto out_controller_put;
+       }
+
        if (!fsl_lpspi->is_slave) {
                for (i = 0; i < controller->num_chipselect; i++) {
                        int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
@@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
                controller->prepare_message = fsl_lpspi_prepare_message;
        }
 
-       controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
-       controller->transfer_one = fsl_lpspi_transfer_one;
-       controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
-       controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
-       controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-       controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
-       controller->dev.of_node = pdev->dev.of_node;
-       controller->bus_num = pdev->id;
-       controller->slave_abort = fsl_lpspi_slave_abort;
-
        init_completion(&fsl_lpspi->xfer_done);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
        if (ret < 0)
                dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
 
-       ret = devm_spi_register_controller(&pdev->dev, controller);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "spi_register_controller error.\n");
-               goto out_controller_put;
-       }
-
        return 0;
 
 out_controller_put:
index 79b1558b74b8a43254c6a57ec356632c122a0b42..e8a499cd1f135c75cffbbda2597d1cb59786f7f8 100644 (file)
@@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem,
            op->data.nbytes > q->devtype_data->txfifo)
                return false;
 
-       return true;
+       return spi_mem_default_supports_op(mem, op);
 }
 
 static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
index 114801a32371c8f734362d3b550890d288943027..3b81772fea0d3e9a11696f20cf7a4ad42a6b5f21 100644 (file)
@@ -611,6 +611,7 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
        master->setup = fsl_spi_setup;
        master->cleanup = fsl_spi_cleanup;
        master->transfer_one_message = fsl_spi_do_one_msg;
+       master->use_gpio_descriptors = true;
 
        mpc8xxx_spi = spi_master_get_devdata(master);
        mpc8xxx_spi->max_bits_per_word = 32;
@@ -705,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
        struct device_node *np = ofdev->dev.of_node;
        struct spi_master *master;
        struct resource mem;
-       int irq = 0, type;
-       int ret = -ENOMEM;
+       int irq, type;
+       int ret;
 
        ret = of_mpc8xxx_spi_probe(ofdev);
        if (ret)
@@ -721,37 +722,35 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
 
                if (spisel_boot) {
                        pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
-                       if (!pinfo->immr_spi_cs) {
-                               ret = -ENOMEM;
-                               goto err;
-                       }
+                       if (!pinfo->immr_spi_cs)
+                               return -ENOMEM;
                }
 #endif
-
-               pdata->cs_control = fsl_spi_cs_control;
+               /*
+                * Handle the case where we have one hardwired (always selected)
+                * device on the first "chipselect". Else we let the core code
+                * handle any GPIOs or native chip selects and assign the
+                * appropriate callback for dealing with the CS lines. This isn't
+                * supported on the GRLIB variant.
+                */
+               ret = gpiod_count(dev, "cs");
+               if (ret <= 0)
+                       pdata->max_chipselect = 1;
+               else
+                       pdata->cs_control = fsl_spi_cs_control;
        }
 
        ret = of_address_to_resource(np, 0, &mem);
        if (ret)
-               goto err;
+               return ret;
 
-       irq = irq_of_parse_and_map(np, 0);
-       if (!irq) {
-               ret = -EINVAL;
-               goto err;
-       }
+       irq = platform_get_irq(ofdev, 0);
+       if (irq < 0)
+               return irq;
 
        master = fsl_spi_probe(dev, &mem, irq);
-       if (IS_ERR(master)) {
-               ret = PTR_ERR(master);
-               goto err;
-       }
-
-       return 0;
 
-err:
-       irq_dispose_mapping(irq);
-       return ret;
+       return PTR_ERR_OR_ZERO(master);
 }
 
 static int of_fsl_spi_remove(struct platform_device *ofdev)
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
new file mode 100644 (file)
index 0000000..4cf8fc8
--- /dev/null
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+struct hisi_sfc_v3xx_host {
+       struct device *dev;
+       void __iomem *regbase;
+       int max_cmd_dword;
+};
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US          1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US    10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+       u32 reg;
+
+       return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+                                 !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+                                 HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+                                 HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+                                       struct spi_mem_op *op)
+{
+       struct spi_device *spi = mem->spi;
+       struct hisi_sfc_v3xx_host *host;
+       uintptr_t addr = (uintptr_t)op->data.buf.in;
+       int max_byte_count;
+
+       host = spi_controller_get_devdata(spi->master);
+
+       max_byte_count = host->max_cmd_dword * 4;
+
+       if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+               op->data.nbytes = 4 - (addr % 4);
+       else if (op->data.nbytes > max_byte_count)
+               op->data.nbytes = max_byte_count;
+
+       return 0;
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+                                      u8 *to, unsigned int len)
+{
+       void __iomem *from;
+       int i;
+
+       from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+       if (IS_ALIGNED((uintptr_t)to, 4)) {
+               int words = len / 4;
+
+               __ioread32_copy(to, from, words);
+
+               len -= words * 4;
+               if (len) {
+                       u32 val;
+
+                       to += words * 4;
+                       from += words * 4;
+
+                       val = __raw_readl(from);
+
+                       for (i = 0; i < len; i++, val >>= 8, to++)
+                               *to = (u8)val;
+               }
+       } else {
+               for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+                       u32 val = __raw_readl(from);
+                       int j;
+
+                       for (j = 0; j < 4 && (j + (i * 4) < len);
+                            to++, val >>= 8, j++)
+                               *to = (u8)val;
+               }
+       }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+                                       const u8 *from, unsigned int len)
+{
+       void __iomem *to;
+       int i;
+
+       to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+       if (IS_ALIGNED((uintptr_t)from, 4)) {
+               int words = len / 4;
+
+               __iowrite32_copy(to, from, words);
+
+               len -= words * 4;
+               if (len) {
+                       u32 val = 0;
+
+                       to += words * 4;
+                       from += words * 4;
+
+                       for (i = 0; i < len; i++, from++)
+                               val |= *from << i * 8;
+                       __raw_writel(val, to);
+               }
+
+       } else {
+               for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+                       u32 val = 0;
+                       int j;
+
+                       for (j = 0; j < 4 && (j + (i * 4) < len);
+                            from++, j++)
+                               val |= *from << j * 8;
+                       __raw_writel(val, to);
+               }
+       }
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+                                        const struct spi_mem_op *op,
+                                        u8 chip_select)
+{
+       int ret, len = op->data.nbytes;
+       u32 config = 0;
+
+       if (op->addr.nbytes)
+               config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+       if (op->data.dir != SPI_MEM_NO_DATA) {
+               config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+               config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+       }
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
+       else if (op->data.dir == SPI_MEM_DATA_IN)
+               config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+       config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+                 chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+                 HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+       writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+       writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+       writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+       ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+       if (ret)
+               return ret;
+
+       if (op->data.dir == SPI_MEM_DATA_IN)
+               hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+
+       return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+                                const struct spi_mem_op *op)
+{
+       struct hisi_sfc_v3xx_host *host;
+       struct spi_device *spi = mem->spi;
+       u8 chip_select = spi->chip_select;
+
+       host = spi_controller_get_devdata(spi->master);
+
+       return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+       .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+       .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct hisi_sfc_v3xx_host *host;
+       struct spi_controller *ctlr;
+       u32 version;
+       int ret;
+
+       ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+       if (!ctlr)
+               return -ENOMEM;
+
+       ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+                         SPI_TX_DUAL | SPI_TX_QUAD;
+
+       host = spi_controller_get_devdata(ctlr);
+       host->dev = dev;
+
+       platform_set_drvdata(pdev, host);
+
+       host->regbase = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(host->regbase)) {
+               ret = PTR_ERR(host->regbase);
+               goto err_put_master;
+       }
+
+       ctlr->bus_num = -1;
+       ctlr->num_chipselect = 1;
+       ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+       version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+       switch (version) {
+       case 0x351:
+               host->max_cmd_dword = 64;
+               break;
+       default:
+               host->max_cmd_dword = 16;
+               break;
+       }
+
+       ret = devm_spi_register_controller(dev, ctlr);
+       if (ret)
+               goto err_put_master;
+
+       dev_info(&pdev->dev, "hw version 0x%x\n", version);
+
+       return 0;
+
+err_put_master:
+       spi_master_put(ctlr);
+       return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+       {"HISI0341", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+#endif
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+       .driver = {
+               .name   = "hisi-sfc-v3xx",
+               .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
+       },
+       .probe  = hisi_sfc_v3xx_probe,
+};
+
+module_platform_driver(hisi_sfc_v3xx_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
index f4a8f470aecc2f71ef7e98e71aa23bcc8de2bd43..8543f5ed1099fc31138394a243dcfb834d8d52bf 100644 (file)
@@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev)
        master->unprepare_message = img_spfi_unprepare;
        master->handle_err = img_spfi_handle_err;
 
-       spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
-       spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+       spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+       if (IS_ERR(spfi->tx_ch)) {
+               ret = PTR_ERR(spfi->tx_ch);
+               spfi->tx_ch = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto disable_pm;
+       }
+
+       spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+       if (IS_ERR(spfi->rx_ch)) {
+               ret = PTR_ERR(spfi->rx_ch);
+               spfi->rx_ch = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto disable_pm;
+       }
+
        if (!spfi->tx_ch || !spfi->rx_ch) {
                if (spfi->tx_ch)
                        dma_release_channel(spfi->tx_ch);
index 49f0099db0cb55ac31e3e2e530eab08a982c478d..f4f28a400a96832b2c11a548a5957b43066fa107 100644 (file)
@@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi,
        }
 
        if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
-               spi_imx->usedma = 1;
+               spi_imx->usedma = true;
        else
-               spi_imx->usedma = 0;
+               spi_imx->usedma = false;
 
        if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
                spi_imx->rx = mx53_ecspi_rx_slave;
index cc49fa41fbabec92372b4cfb9d5dc5f35cce11cb..bba10f030e33daa4b7e404582ff86ed3b18b01df 100644 (file)
@@ -170,7 +170,7 @@ static int jcore_spi_probe(struct platform_device *pdev)
        if (!devm_request_mem_region(&pdev->dev, res->start,
                                     resource_size(res), pdev->name))
                goto exit_busy;
-       hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
+       hw->base = devm_ioremap(&pdev->dev, res->start,
                                        resource_size(res));
        if (!hw->base)
                goto exit_busy;
index f3f10443f9e26f3fee9b67827d07758157f3a081..7f5680fe256820e1d161c8b767ef724b3904bad8 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/reset.h>
-#include <linux/gpio.h>
 
 /*
  * The Meson SPICC controller could support DMA based transfers, but is not
@@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
 
 static int meson_spicc_setup(struct spi_device *spi)
 {
-       int ret = 0;
-
        if (!spi->controller_state)
                spi->controller_state = spi_master_get_devdata(spi->master);
-       else if (gpio_is_valid(spi->cs_gpio))
-               goto out_gpio;
-       else if (spi->cs_gpio == -ENOENT)
-               return 0;
-
-       if (gpio_is_valid(spi->cs_gpio)) {
-               ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
-               if (ret) {
-                       dev_err(&spi->dev, "failed to request cs gpio\n");
-                       return ret;
-               }
-       }
-
-out_gpio:
-       ret = gpio_direction_output(spi->cs_gpio,
-                       !(spi->mode & SPI_CS_HIGH));
 
-       return ret;
+       return 0;
 }
 
 static void meson_spicc_cleanup(struct spi_device *spi)
 {
-       if (gpio_is_valid(spi->cs_gpio))
-               gpio_free(spi->cs_gpio);
-
        spi->controller_state = NULL;
 }
 
@@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
        master->prepare_message = meson_spicc_prepare_message;
        master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
        master->transfer_one = meson_spicc_transfer_one;
+       master->use_gpio_descriptors = true;
 
        /* Setup max rate according to the Meson GX datasheet */
        if ((rate >> 2) > SPICC_MAX_FREQ)
index 996c1c8a9c719e77372c0358025eaa7ed193d232..dce85ee07cd0023de15d3d240464a329e44e533f 100644 (file)
@@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev)
        if (ret)
                goto out_master_free;
 
-       ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
-       if (!ssp->dmach) {
+       ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+       if (IS_ERR(ssp->dmach)) {
                dev_err(ssp->dev, "Failed to request DMA\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(ssp->dmach);
                goto out_master_free;
        }
 
index cb52fd8008d0e1a303e9e456ca03127e6af86f9c..d25ee32862e0f1144309ee9ff5b449382f3ebe2c 100644 (file)
@@ -603,7 +603,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
 
        if (!chip->flash_region_mapped_ptr) {
                chip->flash_region_mapped_ptr =
-                       devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
+                       devm_ioremap(fiu->dev, (fiu->res_mem->start +
                                                        (fiu->info->max_map_size *
                                                    desc->mem->spi->chip_select)),
                                             (u32)desc->info.length);
index fe624731c74ce31a3ad022c65e5215e9189cc8b0..87cd0233c60b5f5ac6db9190c8e7800f7384176c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/reset.h>
 
 #include <asm/unaligned.h>
 
@@ -20,7 +21,7 @@
 
 struct npcm_pspi {
        struct completion xfer_done;
-       struct regmap *rst_regmap;
+       struct reset_control *reset;
        struct spi_master *master;
        unsigned int tx_bytes;
        unsigned int rx_bytes;
@@ -59,12 +60,6 @@ struct npcm_pspi {
 #define NPCM_PSPI_MIN_CLK_DIVIDER      4
 #define NPCM_PSPI_DEFAULT_CLK          25000000
 
-/* reset register */
-#define NPCM7XX_IPSRST2_OFFSET 0x24
-
-#define NPCM7XX_PSPI1_RESET    BIT(22)
-#define NPCM7XX_PSPI2_RESET    BIT(23)
-
 static inline unsigned int bytes_per_word(unsigned int bits)
 {
        return bits <= 8 ? 1 : 2;
@@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
                priv->mode = spi->mode;
        }
 
+       /*
+        * If transfer is even length, and 8 bits per word transfer,
+        * then implement 16 bits-per-word transfer.
+        */
+       if (priv->bits_per_word == 8 && !(t->len & 0x1))
+               t->bits_per_word = 16;
+
        if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
                npcm_pspi_set_transfer_size(priv, t->bits_per_word);
                priv->bits_per_word = t->bits_per_word;
@@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi,
 static void npcm_pspi_send(struct npcm_pspi *priv)
 {
        int wsize;
+       u16 val;
 
        wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
        priv->tx_bytes -= wsize;
@@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv)
 
        switch (wsize) {
        case 1:
-               iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+               val = *priv->tx_buf++;
+               iowrite8(val, NPCM_PSPI_DATA + priv->base);
                break;
        case 2:
-               iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+               val = *priv->tx_buf++;
+               val = *priv->tx_buf++ | (val << 8);
+               iowrite16(val, NPCM_PSPI_DATA + priv->base);
                break;
        default:
                WARN_ON_ONCE(1);
                return;
        }
-
-       priv->tx_buf += wsize;
 }
 
 static void npcm_pspi_recv(struct npcm_pspi *priv)
@@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv)
 
        switch (rsize) {
        case 1:
-               val = ioread8(priv->base + NPCM_PSPI_DATA);
+               *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
                break;
        case 2:
                val = ioread16(priv->base + NPCM_PSPI_DATA);
+               *priv->rx_buf++ = (val >> 8);
+               *priv->rx_buf++ = val & 0xff;
                break;
        default:
                WARN_ON_ONCE(1);
                return;
        }
-
-       *priv->rx_buf = val;
-       priv->rx_buf += rsize;
 }
 
 static int npcm_pspi_transfer_one(struct spi_master *master,
@@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
 
 static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
 {
-       regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
-                    NPCM7XX_PSPI1_RESET << priv->id);
-       regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+       reset_control_assert(priv->reset);
+       udelay(5);
+       reset_control_deassert(priv->reset);
 }
 
 static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
@@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        if (num_cs < 0)
                return num_cs;
 
-       pdev->id = of_alias_get_id(np, "spi");
-       if (pdev->id < 0)
-               pdev->id = 0;
-
        master = spi_alloc_master(&pdev->dev, sizeof(*priv));
        if (!master)
                return -ENOMEM;
@@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        priv = spi_master_get_devdata(master);
        priv->master = master;
        priv->is_save_param = false;
-       priv->id = pdev->id;
 
        priv->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv->base)) {
@@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev)
                goto out_disable_clk;
        }
 
-       priv->rst_regmap =
-               syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
-       if (IS_ERR(priv->rst_regmap)) {
-               dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
-               return PTR_ERR(priv->rst_regmap);
+       priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->reset)) {
+               ret = PTR_ERR(priv->reset);
+               goto out_disable_clk;
        }
 
        /* reset SPI-HW block */
@@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
        master->mode_bits = SPI_CPHA | SPI_CPOL;
        master->dev.of_node = pdev->dev.of_node;
-       master->bus_num = pdev->id;
+       master->bus_num = -1;
        master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
        master->transfer_one = npcm_pspi_transfer_one;
        master->prepare_transfer_hardware =
@@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
        if (ret)
                goto out_disable_clk;
 
-       pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+       pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
 
        return 0;
 
index c36bb1bb464e60de23c466d700cff0c3ef1f02c6..8c5084a3a617b1a6839959a90f5691a3a2e45c41 100644 (file)
@@ -439,7 +439,7 @@ static bool nxp_fspi_supports_op(struct spi_mem *mem,
            op->data.nbytes > f->devtype_data->txfifo)
                return false;
 
-       return true;
+       return spi_mem_default_supports_op(mem, op);
 }
 
 /* Instead of busy looping invoke readl_poll_timeout functionality. */
index e2331eb7b47a515ca12387782e417620461ab1af..9df7c5979c299efe2d8a4efc9709af7d2f714e64 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/spi/spi_bitbang.h>
 #include <linux/spi/spi_oc_tiny.h>
 #include <linux/io.h>
-#include <linux/gpio.h>
 #include <linux/of.h>
 
 #define DRV_NAME "spi_oc_tiny"
@@ -50,8 +49,6 @@ struct tiny_spi {
        unsigned int txc, rxc;
        const u8 *txp;
        u8 *rxp;
-       int gpio_cs_count;
-       int *gpio_cs;
 };
 
 static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
@@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
        return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
 }
 
-static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
-{
-       struct tiny_spi *hw = tiny_spi_to_hw(spi);
-
-       if (hw->gpio_cs_count > 0) {
-               gpio_set_value(hw->gpio_cs[spi->chip_select],
-                       (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
-       }
-}
-
 static int tiny_spi_setup_transfer(struct spi_device *spi,
                                   struct spi_transfer *t)
 {
@@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
 {
        struct tiny_spi *hw = platform_get_drvdata(pdev);
        struct device_node *np = pdev->dev.of_node;
-       unsigned int i;
        u32 val;
 
        if (!np)
                return 0;
-       hw->gpio_cs_count = of_gpio_count(np);
-       if (hw->gpio_cs_count > 0) {
-               hw->gpio_cs = devm_kcalloc(&pdev->dev,
-                               hw->gpio_cs_count, sizeof(unsigned int),
-                               GFP_KERNEL);
-               if (!hw->gpio_cs)
-                       return -ENOMEM;
-       }
-       for (i = 0; i < hw->gpio_cs_count; i++) {
-               hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
-               if (hw->gpio_cs[i] < 0)
-                       return -ENODEV;
-       }
        hw->bitbang.master->dev.of_node = pdev->dev.of_node;
        if (!of_property_read_u32(np, "clock-frequency", &val))
                hw->freq = val;
@@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
        struct tiny_spi *hw;
        struct spi_master *master;
-       unsigned int i;
        int err = -ENODEV;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
@@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev)
 
        /* setup the master state. */
        master->bus_num = pdev->id;
-       master->num_chipselect = 255;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
        master->setup = tiny_spi_setup;
+       master->use_gpio_descriptors = true;
 
        hw = spi_master_get_devdata(master);
        platform_set_drvdata(pdev, hw);
@@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        /* setup the state for the bitbang driver */
        hw->bitbang.master = master;
        hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
-       hw->bitbang.chipselect = tiny_spi_chipselect;
        hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
 
        /* find and map our resources */
@@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
        }
        /* find platform data */
        if (platp) {
-               hw->gpio_cs_count = platp->gpio_cs_count;
-               hw->gpio_cs = platp->gpio_cs;
-               if (platp->gpio_cs_count && !platp->gpio_cs) {
-                       err = -EBUSY;
-                       goto exit;
-               }
                hw->freq = platp->freq;
                hw->baudwidth = platp->baudwidth;
        } else {
@@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
                if (err)
                        goto exit;
        }
-       for (i = 0; i < hw->gpio_cs_count; i++) {
-               err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
-               if (err)
-                       goto exit_gpio;
-               gpio_direction_output(hw->gpio_cs[i], 1);
-       }
-       hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
 
        /* register our spi controller */
        err = spi_bitbang_start(&hw->bitbang);
@@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
 
        return 0;
 
-exit_gpio:
-       while (i-- > 0)
-               gpio_free(hw->gpio_cs[i]);
 exit:
        spi_master_put(master);
        return err;
@@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev)
 {
        struct tiny_spi *hw = platform_get_drvdata(pdev);
        struct spi_master *master = hw->bitbang.master;
-       unsigned int i;
 
        spi_bitbang_stop(&hw->bitbang);
-       for (i = 0; i < hw->gpio_cs_count; i++)
-               gpio_free(hw->gpio_cs[i]);
        spi_master_put(master);
        return 0;
 }
index 16b6b2ad4e7c0aec2c3d58f6aa9ddac6f6d9dd32..4c7a71f0fb3e3fc7a2d53f6bd076ce8ac364e171 100644 (file)
@@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
        return limit;
 }
 
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+       /* On MMP, disabling SSE seems to corrupt the rx fifo */
+       if (drv_data->ssp_type == MMP2_SSP)
+               return;
+
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+}
+
 static int null_writer(struct driver_data *drv_data)
 {
        u8 n_bytes = drv_data->n_bytes;
@@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
        if (!pxa25x_ssp_comp(drv_data))
                pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
 
        dev_err(&drv_data->pdev->dev, "%s\n", msg);
 
@@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
 static void handle_bad_msg(struct driver_data *drv_data)
 {
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
        pxa2xx_spi_write(drv_data, SSCR1,
                         pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
        if (!pxa25x_ssp_comp(drv_data))
@@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
            || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
            != (cr1 & change_mask)) {
                /* stop the SSP, and update the other bits */
-               pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+               if (drv_data->ssp_type != MMP2_SSP)
+                       pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
                if (!pxa25x_ssp_comp(drv_data))
                        pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
                /* first set CR1 without interrupt and service enables */
@@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
        if (!pxa25x_ssp_comp(drv_data))
                pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
 
        dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
 
@@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
        /* Disable the SSP */
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
        /* Clear and disable interrupts and service requests */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        pxa2xx_spi_write(drv_data, SSCR1,
@@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
        /* Disable the SSP now */
-       pxa2xx_spi_write(drv_data, SSCR0,
-                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+       pxa2xx_spi_off(drv_data);
 
        return 0;
 }
@@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        /* KBL-H */
        { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
        { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+       /* CML-V */
+       { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+       { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
        /* BXT A-Step */
        { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
@@ -1443,6 +1452,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
+       /* JSL */
+       { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
        /* APL */
        { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
index 250fd60e1678211d718b3e3684bdc0b7f9c6f9fc..3c4f83bf7084c81ebc32f60f5b0abaa7df803190 100644 (file)
@@ -137,7 +137,7 @@ enum qspi_clocks {
 struct qcom_qspi {
        void __iomem *base;
        struct device *dev;
-       struct clk_bulk_data clks[QSPI_NUM_CLKS];
+       struct clk_bulk_data *clks;
        struct qspi_xfer xfer;
        /* Lock to protect xfer and IRQ accessed registers */
        spinlock_t lock;
@@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
                goto exit_probe_master_put;
        }
 
+       ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+                                 sizeof(*ctrl->clks), GFP_KERNEL);
+       if (!ctrl->clks) {
+               ret = -ENOMEM;
+               goto exit_probe_master_put;
+       }
+
        ctrl->clks[QSPI_CLK_CORE].id = "core";
        ctrl->clks[QSPI_CLK_IFACE].id = "iface";
        ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
index 7222c7689c3c4cea1c5c1ba710da1856bfa07700..85575d45901cee1b90c9c3dc2f1c41eeaf920e14 100644 (file)
 #define SPCMD_SPIMOD_DUAL      SPCMD_SPIMOD0
 #define SPCMD_SPIMOD_QUAD      SPCMD_SPIMOD1
 #define SPCMD_SPRW             0x0010  /* SPI Read/Write Access (Dual/Quad) */
-#define SPCMD_SSLA_MASK                0x0030  /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_SSLA(i)          ((i) << 4)      /* SSL Assert Signal Setting */
 #define SPCMD_BRDV_MASK                0x000c  /* Bit Rate Division Setting */
 #define SPCMD_CPOL             0x0002  /* Clock Polarity Setting */
 #define SPCMD_CPHA             0x0001  /* Clock Phase Setting */
@@ -242,6 +242,7 @@ struct spi_ops {
        u16 mode_bits;
        u16 flags;
        u16 fifo_size;
+       u8 num_hw_ss;
 };
 
 /*
@@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
        return n;
 }
 
-#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-
 static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
 {
        rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
@@ -620,9 +619,8 @@ no_dma_tx:
                dmaengine_terminate_all(rspi->ctlr->dma_rx);
 no_dma_rx:
        if (ret == -EAGAIN) {
-               pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
-                            dev_driver_string(&rspi->ctlr->dev),
-                            dev_name(&rspi->ctlr->dev));
+               dev_warn_once(&rspi->ctlr->dev,
+                             "DMA not available, falling back to PIO\n");
        }
        return ret;
 }
@@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
        if (spi->mode & SPI_CPHA)
                rspi->spcmd |= SPCMD_CPHA;
 
+       /* Configure slave signal to assert */
+       rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+                                               : spi->chip_select);
+
        /* CMOS output mode and MOSI signal from previous transfer */
        rspi->sppcr = 0;
        if (spi->mode & SPI_LOOP)
                rspi->sppcr |= SPPCR_SPLP;
 
-       set_config_register(rspi, 8);
+       rspi->ops->set_config_register(rspi, 8);
 
        if (msg->spi->mode &
            (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
@@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = {
        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
        .flags =                SPI_CONTROLLER_MUST_TX,
        .fifo_size =            8,
+       .num_hw_ss =            2,
 };
 
 static const struct spi_ops rspi_rz_ops = {
@@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = {
        .mode_bits =            SPI_CPHA | SPI_CPOL | SPI_LOOP,
        .flags =                SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
        .fifo_size =            8,      /* 8 for TX, 32 for RX */
+       .num_hw_ss =            1,
 };
 
 static const struct spi_ops qspi_ops = {
@@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = {
                                SPI_RX_DUAL | SPI_RX_QUAD,
        .flags =                SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
        .fifo_size =            32,
+       .num_hw_ss =            1,
 };
 
 #ifdef CONFIG_OF
@@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev)
        ctlr->mode_bits = ops->mode_bits;
        ctlr->flags = ops->flags;
        ctlr->dev.of_node = pdev->dev.of_node;
+       ctlr->use_gpio_descriptors = true;
+       ctlr->max_native_cs = rspi->ops->num_hw_ss;
 
        ret = platform_get_irq_byname_optional(pdev, "rx");
        if (ret < 0) {
@@ -1314,8 +1321,6 @@ error1:
 
 static const struct platform_device_id spi_driver_ids[] = {
        { "rspi",       (kernel_ulong_t)&rspi_ops },
-       { "rspi-rz",    (kernel_ulong_t)&rspi_rz_ops },
-       { "qspi",       (kernel_ulong_t)&qspi_ops },
        {},
 };
 
index 8f134735291f14c31541b486b252aa5da3b2f23f..1c11a00a2c36788e5d03955e58851f3b81c17969 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 #include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -55,7 +53,6 @@ struct sh_msiof_spi_priv {
        void *rx_dma_page;
        dma_addr_t tx_dma_addr;
        dma_addr_t rx_dma_addr;
-       unsigned short unused_ss;
        bool native_cs_inited;
        bool native_cs_high;
        bool slave_aborted;
@@ -63,140 +60,140 @@ struct sh_msiof_spi_priv {
 
 #define MAX_SS 3       /* Maximum number of native chip selects */
 
-#define TMDR1  0x00    /* Transmit Mode Register 1 */
-#define TMDR2  0x04    /* Transmit Mode Register 2 */
-#define TMDR3  0x08    /* Transmit Mode Register 3 */
-#define RMDR1  0x10    /* Receive Mode Register 1 */
-#define RMDR2  0x14    /* Receive Mode Register 2 */
-#define RMDR3  0x18    /* Receive Mode Register 3 */
-#define TSCR   0x20    /* Transmit Clock Select Register */
-#define RSCR   0x22    /* Receive Clock Select Register (SH, A1, APE6) */
-#define CTR    0x28    /* Control Register */
-#define FCTR   0x30    /* FIFO Control Register */
-#define STR    0x40    /* Status Register */
-#define IER    0x44    /* Interrupt Enable Register */
-#define TDR1   0x48    /* Transmit Control Data Register 1 (SH, A1) */
-#define TDR2   0x4c    /* Transmit Control Data Register 2 (SH, A1) */
-#define TFDR   0x50    /* Transmit FIFO Data Register */
-#define RDR1   0x58    /* Receive Control Data Register 1 (SH, A1) */
-#define RDR2   0x5c    /* Receive Control Data Register 2 (SH, A1) */
-#define RFDR   0x60    /* Receive FIFO Data Register */
-
-/* TMDR1 and RMDR1 */
-#define MDR1_TRMD         BIT(31)  /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK   GENMASK(29, 28) /* SYNC Mode */
-#define MDR1_SYNCMD_SPI           (2 << 28)/*   Level mode/SPI */
-#define MDR1_SYNCMD_LR    (3 << 28)/*   L/R mode */
-#define MDR1_SYNCAC_SHIFT  25       /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT  24       /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT           20       /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT  16       /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK     GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT    2
-#define MDR1_XXSTP        BIT(0)   /* Transmission/Reception Stop on FIFO */
-/* TMDR1 */
-#define TMDR1_PCON        BIT(30)  /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK  GENMASK(27, 26) /* Sync Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26       /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* TMDR2 and RMDR2 */
-#define MDR2_BITLEN1(i)        (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1  BIT(0)      /* Group Output Mask 1 (SH, A1) */
-
-/* TSCR and RSCR */
-#define SCR_BRPS_MASK  GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SCR_BRPS(i)    (((i) - 1) << 8)
-#define SCR_BRDV_MASK  GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0
-#define SCR_BRDV_DIV_4 1
-#define SCR_BRDV_DIV_8 2
-#define SCR_BRDV_DIV_16        3
-#define SCR_BRDV_DIV_32        4
-#define SCR_BRDV_DIV_1 7
-
-/* CTR */
-#define CTR_TSCKIZ_MASK        GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK BIT(31)   /*   Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30   /*   Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK        GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK BIT(29)   /*   Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28   /*   Receive Clock Polarity */
-#define CTR_TEDG_SHIFT      27   /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT      26   /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW  (0 << 22) /*   0 */
-#define CTR_TXDIZ_HIGH (1 << 22) /*   1 */
-#define CTR_TXDIZ_HIZ  (2 << 22) /*   High-impedance */
-#define CTR_TSCKE      BIT(15)   /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE       BIT(14)   /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE                BIT(9)    /* Transmit Enable */
-#define CTR_RXE                BIT(8)    /* Receive Enable */
-#define CTR_TXRST      BIT(1)    /* Transmit Reset */
-#define CTR_RXRST      BIT(0)    /* Receive Reset */
-
-/* FCTR */
-#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64   (0 << 29) /*  Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32   (1 << 29) /*  Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24   (2 << 29) /*  Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16   (3 << 29) /*  Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12   (4 << 29) /*  Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8    (5 << 29) /*  Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4    (6 << 29) /*  Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1    (7 << 29) /*  Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT        20
-#define FCTR_TFUA(i)   ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define FCTR_RFWM_1    (0 << 13) /*  Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4    (1 << 13) /*  Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8    (2 << 13) /*  Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16   (3 << 13) /*  Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32   (4 << 13) /*  Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64   (5 << 13) /*  Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128  (6 << 13) /*  Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256  (7 << 13) /*  Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT        4
-#define FCTR_RFUA(i)   ((i) << FCTR_RFUA_SHIFT)
-
-/* STR */
-#define STR_TFEMP      BIT(29) /* Transmit FIFO Empty */
-#define STR_TDREQ      BIT(28) /* Transmit Data Transfer Request */
-#define STR_TEOF       BIT(23) /* Frame Transmission End */
-#define STR_TFSERR     BIT(21) /* Transmit Frame Synchronization Error */
-#define STR_TFOVF      BIT(20) /* Transmit FIFO Overflow */
-#define STR_TFUDF      BIT(19) /* Transmit FIFO Underflow */
-#define STR_RFFUL      BIT(13) /* Receive FIFO Full */
-#define STR_RDREQ      BIT(12) /* Receive Data Transfer Request */
-#define STR_REOF       BIT(7)  /* Frame Reception End */
-#define STR_RFSERR     BIT(5)  /* Receive Frame Synchronization Error */
-#define STR_RFUDF      BIT(4)  /* Receive FIFO Underflow */
-#define STR_RFOVF      BIT(3)  /* Receive FIFO Overflow */
-
-/* IER */
-#define IER_TDMAE      BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE     BIT(29) /* Transmit FIFO Empty Enable */
-#define IER_TDREQE     BIT(28) /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE      BIT(23) /* Frame Transmission End Enable */
-#define IER_TFSERRE    BIT(21) /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE     BIT(20) /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE     BIT(19) /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE      BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE     BIT(13) /* Receive FIFO Full Enable */
-#define IER_RDREQE     BIT(12) /* Receive Data Transfer Request Enable */
-#define IER_REOFE      BIT(7)  /* Frame Reception End Enable */
-#define IER_RFSERRE    BIT(5)  /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE     BIT(4)  /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE     BIT(3)  /* Receive FIFO Overflow Enable */
+#define SITMDR1        0x00    /* Transmit Mode Register 1 */
+#define SITMDR2        0x04    /* Transmit Mode Register 2 */
+#define SITMDR3        0x08    /* Transmit Mode Register 3 */
+#define SIRMDR1        0x10    /* Receive Mode Register 1 */
+#define SIRMDR2        0x14    /* Receive Mode Register 2 */
+#define SIRMDR3        0x18    /* Receive Mode Register 3 */
+#define SITSCR 0x20    /* Transmit Clock Select Register */
+#define SIRSCR 0x22    /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR  0x28    /* Control Register */
+#define SIFCTR 0x30    /* FIFO Control Register */
+#define SISTR  0x40    /* Status Register */
+#define SIIER  0x44    /* Interrupt Enable Register */
+#define SITDR1 0x48    /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c    /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50    /* Transmit FIFO Data Register */
+#define SIRDR1 0x58    /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c    /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60    /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD            BIT(31)         /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK     GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI      (2 << 28)       /*   Level mode/SPI */
+#define SIMDR1_SYNCMD_LR       (3 << 28)       /*   L/R mode */
+#define SIMDR1_SYNCAC_SHIFT    25              /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT    24              /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT      20              /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT    16              /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK                GENMASK(3, 2)   /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT       2
+#define SIMDR1_XXSTP           BIT(0)          /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON           BIT(30)         /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK    GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT   26              /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i)      (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i)       (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1                BIT(0)          /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK                GENMASK(12, 8)  /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i)          (((i) - 1) << 8)
+#define SISCR_BRDV_MASK                GENMASK(2, 0)   /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2       0
+#define SISCR_BRDV_DIV_4       1
+#define SISCR_BRDV_DIV_8       2
+#define SISCR_BRDV_DIV_16      3
+#define SISCR_BRDV_DIV_32      4
+#define SISCR_BRDV_DIV_1       7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK      GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK       BIT(31)         /*   Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30              /*   Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK      GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK       BIT(29)         /*   Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28              /*   Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT       27              /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT       26              /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK       GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW                (0 << 22)       /*   0 */
+#define SICTR_TXDIZ_HIGH       (1 << 22)       /*   1 */
+#define SICTR_TXDIZ_HIZ                (2 << 22)       /*   High-impedance */
+#define SICTR_TSCKE            BIT(15)         /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE             BIT(14)         /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE              BIT(9)          /* Transmit Enable */
+#define SICTR_RXE              BIT(8)          /* Receive Enable */
+#define SICTR_TXRST            BIT(1)          /* Transmit Reset */
+#define SICTR_RXRST            BIT(0)          /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK       GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64         (0 << 29)       /*  Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32         (1 << 29)       /*  Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24         (2 << 29)       /*  Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16         (3 << 29)       /*  Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12         (4 << 29)       /*  Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8          (5 << 29)       /*  Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4          (6 << 29)       /*  Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1          (7 << 29)       /*  Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK       GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT      20
+#define SIFCTR_TFUA(i)         ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK       GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1          (0 << 13)       /*  Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4          (1 << 13)       /*  Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8          (2 << 13)       /*  Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16         (3 << 13)       /*  Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32         (4 << 13)       /*  Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64         (5 << 13)       /*  Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128                (6 << 13)       /*  Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256                (7 << 13)       /*  Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK       GENMASK(12, 4)  /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT      4
+#define SIFCTR_RFUA(i)         ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP            BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ            BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF             BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR           BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF            BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF            BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL            BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ            BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF             BIT(7)  /* Frame Reception End */
+#define SISTR_RFSERR           BIT(5)  /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF            BIT(4)  /* Receive FIFO Underflow */
+#define SISTR_RFOVF            BIT(3)  /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE            BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE           BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE           BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE            BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE          BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE           BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE           BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE            BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE           BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE           BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE            BIT(7)  /* Frame Reception End Enable */
+#define SIIER_RFSERRE          BIT(5)  /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE           BIT(4)  /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE           BIT(3)  /* Receive FIFO Overflow Enable */
 
 
 static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
 {
        switch (reg_offs) {
-       case TSCR:
-       case RSCR:
+       case SITSCR:
+       case SIRSCR:
                return ioread16(p->mapbase + reg_offs);
        default:
                return ioread32(p->mapbase + reg_offs);
@@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
                           u32 value)
 {
        switch (reg_offs) {
-       case TSCR:
-       case RSCR:
+       case SITSCR:
+       case SIRSCR:
                iowrite16(value, p->mapbase + reg_offs);
                break;
        default:
@@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
        u32 mask = clr | set;
        u32 data;
 
-       data = sh_msiof_read(p, CTR);
+       data = sh_msiof_read(p, SICTR);
        data &= ~clr;
        data |= set;
-       sh_msiof_write(p, CTR, data);
+       sh_msiof_write(p, SICTR, data);
 
-       return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+       return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
                                         (data & mask) == set, 1, 100);
 }
 
@@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
        struct sh_msiof_spi_priv *p = data;
 
        /* just disable the interrupt and wake up */
-       sh_msiof_write(p, IER, 0);
+       sh_msiof_write(p, SIIER, 0);
        complete(&p->done);
 
        return IRQ_HANDLED;
@@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 
 static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
 {
-       u32 mask = CTR_TXRST | CTR_RXRST;
+       u32 mask = SICTR_TXRST | SICTR_RXRST;
        u32 data;
 
-       data = sh_msiof_read(p, CTR);
+       data = sh_msiof_read(p, SICTR);
        data |= mask;
-       sh_msiof_write(p, CTR, data);
+       sh_msiof_write(p, SICTR, data);
 
-       readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+       readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
                                  100);
 }
 
 static const u32 sh_msiof_spi_div_array[] = {
-       SCR_BRDV_DIV_1, SCR_BRDV_DIV_2,  SCR_BRDV_DIV_4,
-       SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
+       SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+       SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
 };
 
 static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
@@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 
        div = DIV_ROUND_UP(parent_rate, spi_hz);
        if (div <= 1024) {
-               /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+               /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
                if (!div_pow && div <= 32 && div > 2)
                        div_pow = 1;
 
@@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
                brps = 32;
        }
 
-       scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
-       sh_msiof_write(p, TSCR, scr);
+       scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+       sh_msiof_write(p, SITSCR, scr);
        if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
-               sh_msiof_write(p, RSCR, scr);
+               sh_msiof_write(p, SIRSCR, scr);
 }
 
 static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
@@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
                return 0;
        }
 
-       val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
-       val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+       val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+       val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
 
        return val;
 }
@@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
         *    1    0         11     11    0    0
         *    1    1         11     11    1    1
         */
-       tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
-       tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
-       tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+       tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+       tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+       tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
        tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
        if (spi_controller_is_slave(p->ctlr)) {
-               sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+               sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
        } else {
-               sh_msiof_write(p, TMDR1,
-                              tmp | MDR1_TRMD | TMDR1_PCON |
-                              (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+               sh_msiof_write(p, SITMDR1,
+                              tmp | SIMDR1_TRMD | SITMDR1_PCON |
+                              (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
        }
        if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
                /* These bits are reserved if RX needs TX */
                tmp &= ~0x0000ffff;
        }
-       sh_msiof_write(p, RMDR1, tmp);
+       sh_msiof_write(p, SIRMDR1, tmp);
 
        tmp = 0;
-       tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
-       tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+       tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+       tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
 
        edge = cpol ^ !cpha;
 
-       tmp |= edge << CTR_TEDG_SHIFT;
-       tmp |= edge << CTR_REDG_SHIFT;
-       tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
-       sh_msiof_write(p, CTR, tmp);
+       tmp |= edge << SICTR_TEDG_SHIFT;
+       tmp |= edge << SICTR_REDG_SHIFT;
+       tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+       sh_msiof_write(p, SICTR, tmp);
 }
 
 static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
                                       const void *tx_buf, void *rx_buf,
                                       u32 bits, u32 words)
 {
-       u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+       u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
 
        if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
-               sh_msiof_write(p, TMDR2, dr2);
+               sh_msiof_write(p, SITMDR2, dr2);
        else
-               sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+               sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
 
        if (rx_buf)
-               sh_msiof_write(p, RMDR2, dr2);
+               sh_msiof_write(p, SIRMDR2, dr2);
 }
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR,
-                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
+       sh_msiof_write(p, SISTR,
+                      sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, buf_8[k] << fs);
+               sh_msiof_write(p, SITFDR, buf_8[k] << fs);
 }
 
 static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
@@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, buf_16[k] << fs);
+               sh_msiof_write(p, SITFDR, buf_16[k] << fs);
 }
 
 static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+               sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
 }
 
 static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
@@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, buf_32[k] << fs);
+               sh_msiof_write(p, SITFDR, buf_32[k] << fs);
 }
 
 static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+               sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
 }
 
 static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+               sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
 }
 
 static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+               sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
 }
 
 static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
@@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+               buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
 }
 
 static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
@@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+               buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
 }
 
 static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
@@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+               put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
 }
 
 static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
@@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+               buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
 }
 
 static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
@@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+               put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
 }
 
 static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
@@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+               buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
 }
 
 static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
@@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
        int k;
 
        for (k = 0; k < words; k++)
-               put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+               put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
 }
 
 static int sh_msiof_spi_setup(struct spi_device *spi)
@@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
                return 0;
 
        /* Configure native chip select mode/polarity early */
-       clr = MDR1_SYNCMD_MASK;
-       set = MDR1_SYNCMD_SPI;
+       clr = SIMDR1_SYNCMD_MASK;
+       set = SIMDR1_SYNCMD_SPI;
        if (spi->mode & SPI_CS_HIGH)
-               clr |= BIT(MDR1_SYNCAC_SHIFT);
+               clr |= BIT(SIMDR1_SYNCAC_SHIFT);
        else
-               set |= BIT(MDR1_SYNCAC_SHIFT);
+               set |= BIT(SIMDR1_SYNCAC_SHIFT);
        pm_runtime_get_sync(&p->pdev->dev);
-       tmp = sh_msiof_read(p, TMDR1) & ~clr;
-       sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
-       tmp = sh_msiof_read(p, RMDR1) & ~clr;
-       sh_msiof_write(p, RMDR1, tmp | set);
+       tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+       sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+       tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+       sh_msiof_write(p, SIRMDR1, tmp | set);
        pm_runtime_put(&p->pdev->dev);
        p->native_cs_high = spi->mode & SPI_CS_HIGH;
        p->native_cs_inited = true;
@@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
 
        /* Configure pins before asserting CS */
        if (spi->cs_gpiod) {
-               ss = p->unused_ss;
+               ss = ctlr->unused_native_cs;
                cs_high = p->native_cs_high;
        } else {
                ss = spi->chip_select;
@@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 
        /* setup clock and rx/tx signals */
        if (!slave)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
        if (rx_buf && !ret)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
        if (!ret)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
 
        /* start by setting frame bit */
        if (!ret && !slave)
-               ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+               ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
 
        return ret;
 }
@@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 
        /* shut down frame, rx/tx and clock signals */
        if (!slave)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
        if (!ret)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
        if (rx_buf && !ret)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
        if (!ret && !slave)
-               ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+               ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
 
        return ret;
 }
@@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
        fifo_shift = 32 - bits;
 
        /* default FIFO watermarks for PIO */
-       sh_msiof_write(p, FCTR, 0);
+       sh_msiof_write(p, SIFCTR, 0);
 
        /* setup msiof transfer mode registers */
        sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
-       sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+       sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
 
        /* write tx fifo */
        if (tx_buf)
@@ -731,7 +728,7 @@ stop_reset:
        sh_msiof_reset_str(p);
        sh_msiof_spi_stop(p, rx_buf);
 stop_ier:
-       sh_msiof_write(p, IER, 0);
+       sh_msiof_write(p, SIIER, 0);
        return ret;
 }
 
@@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 
        /* First prepare and submit the DMA request(s), as this may fail */
        if (rx) {
-               ier_bits |= IER_RDREQE | IER_RDMAE;
+               ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
                desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
                                        p->rx_dma_addr, len, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
        }
 
        if (tx) {
-               ier_bits |= IER_TDREQE | IER_TDMAE;
+               ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
                dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
                                           p->tx_dma_addr, len, DMA_TO_DEVICE);
                desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
@@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
        }
 
        /* 1 stage FIFO watermarks for DMA */
-       sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+       sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
 
        /* setup msiof transfer mode registers (32-bit words) */
        sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
 
-       sh_msiof_write(p, IER, ier_bits);
+       sh_msiof_write(p, SIIER, ier_bits);
 
        reinit_completion(&p->done);
        if (tx)
@@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
                if (ret)
                        goto stop_reset;
 
-               sh_msiof_write(p, IER, 0);
+               sh_msiof_write(p, SIIER, 0);
        } else {
                /* wait for tx fifo to be emptied */
-               sh_msiof_write(p, IER, IER_TEOFE);
+               sh_msiof_write(p, SIIER, SIIER_TEOFE);
                ret = sh_msiof_wait_for_completion(p, &p->done);
                if (ret)
                        goto stop_reset;
@@ -856,7 +853,7 @@ stop_dma:
 no_dma_tx:
        if (rx)
                dmaengine_terminate_all(p->ctlr->dma_rx);
-       sh_msiof_write(p, IER, 0);
+       sh_msiof_write(p, SIIER, 0);
        return ret;
 }
 
@@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
 }
 #endif
 
-static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
-{
-       struct device *dev = &p->pdev->dev;
-       unsigned int used_ss_mask = 0;
-       unsigned int cs_gpios = 0;
-       unsigned int num_cs, i;
-       int ret;
-
-       ret = gpiod_count(dev, "cs");
-       if (ret <= 0)
-               return 0;
-
-       num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
-       for (i = 0; i < num_cs; i++) {
-               struct gpio_desc *gpiod;
-
-               gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
-               if (!IS_ERR(gpiod)) {
-                       devm_gpiod_put(dev, gpiod);
-                       cs_gpios++;
-                       continue;
-               }
-
-               if (PTR_ERR(gpiod) != -ENOENT)
-                       return PTR_ERR(gpiod);
-
-               if (i >= MAX_SS) {
-                       dev_err(dev, "Invalid native chip select %d\n", i);
-                       return -EINVAL;
-               }
-               used_ss_mask |= BIT(i);
-       }
-       p->unused_ss = ffz(used_ss_mask);
-       if (cs_gpios && p->unused_ss >= MAX_SS) {
-               dev_err(dev, "No unused native chip select available\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
 static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
        enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
 {
@@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
 
        ctlr = p->ctlr;
        ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
-                                                dma_tx_id, res->start + TFDR);
+                                                dma_tx_id, res->start + SITFDR);
        if (!ctlr->dma_tx)
                return -ENODEV;
 
        ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
-                                                dma_rx_id, res->start + RFDR);
+                                                dma_rx_id, res->start + SIRFDR);
        if (!ctlr->dma_rx)
                goto free_tx_chan;
 
@@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
        if (p->info->rx_fifo_override)
                p->rx_fifo_size = p->info->rx_fifo_override;
 
-       /* Setup GPIO chip selects */
-       ctlr->num_chipselect = p->info->num_chipselect;
-       ret = sh_msiof_get_cs_gpios(p);
-       if (ret)
-               goto err1;
-
        /* init controller code */
        ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
        ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
        ctlr->flags = chipdata->ctlr_flags;
        ctlr->bus_num = pdev->id;
+       ctlr->num_chipselect = p->info->num_chipselect;
        ctlr->dev.of_node = pdev->dev.of_node;
        ctlr->setup = sh_msiof_spi_setup;
        ctlr->prepare_message = sh_msiof_prepare_message;
@@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
        ctlr->auto_runtime_pm = true;
        ctlr->transfer_one = sh_msiof_transfer_one;
        ctlr->use_gpio_descriptors = true;
+       ctlr->max_native_cs = MAX_SS;
 
        ret = sh_msiof_request_dma(p);
        if (ret < 0)
index e1e6391915577914990afad2f9e7b9ca997478fc..8419e6722e1746bebd64cbb7629b622d0d8238d2 100644 (file)
@@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
        sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
 
        /* request DMA channels */
-       sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
-       if (!sspi->rx_chan) {
+       sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
+       if (IS_ERR(sspi->rx_chan)) {
                dev_err(&pdev->dev, "can not allocate rx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(sspi->rx_chan);
                goto free_master;
        }
-       sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
-       if (!sspi->tx_chan) {
+       sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
+       if (IS_ERR(sspi->tx_chan)) {
                dev_err(&pdev->dev, "can not allocate tx dma channel\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(sspi->tx_chan);
                goto free_rx_dma;
        }
 
index 2ee1feb416812faf45b50b0e3e74dd9f1dde6891..6678f1cbc566078a7681a4aa047bc01a80cc4456 100644 (file)
@@ -678,7 +678,7 @@ static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
        if (d->unit != SPI_DELAY_UNIT_SCK)
                return -EINVAL;
 
-       val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
+       val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
        val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
        /* Set default chip selection, clock phase and clock polarity */
        val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
index 4e726929bb4f5e90d161d3933aa6075e6aff260f..4ef569b47aa6d244af3e633c7f79e79917d365d3 100644 (file)
@@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
        return 0;
 }
 
-static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
 {
        struct dma_slave_config dma_cfg;
        struct device *dev = qspi->dev;
+       int ret = 0;
 
        memset(&dma_cfg, 0, sizeof(dma_cfg));
 
@@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
        dma_cfg.src_maxburst = 4;
        dma_cfg.dst_maxburst = 4;
 
-       qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
-       if (qspi->dma_chrx) {
+       qspi->dma_chrx = dma_request_chan(dev, "rx");
+       if (IS_ERR(qspi->dma_chrx)) {
+               ret = PTR_ERR(qspi->dma_chrx);
+               qspi->dma_chrx = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto out;
+       } else {
                if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
                        dev_err(dev, "dma rx config failed\n");
                        dma_release_channel(qspi->dma_chrx);
@@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
                }
        }
 
-       qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
-       if (qspi->dma_chtx) {
+       qspi->dma_chtx = dma_request_chan(dev, "tx");
+       if (IS_ERR(qspi->dma_chtx)) {
+               ret = PTR_ERR(qspi->dma_chtx);
+               qspi->dma_chtx = NULL;
+       } else {
                if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
                        dev_err(dev, "dma tx config failed\n");
                        dma_release_channel(qspi->dma_chtx);
@@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
                }
        }
 
+out:
        init_completion(&qspi->dma_completion);
+
+       if (ret != -EPROBE_DEFER)
+               ret = 0;
+
+       return ret;
 }
 
 static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
@@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev)
 
        qspi->dev = dev;
        platform_set_drvdata(pdev, qspi);
-       stm32_qspi_dma_setup(qspi);
+       ret = stm32_qspi_dma_setup(qspi);
+       if (ret)
+               goto err;
+
        mutex_init(&qspi->lock);
 
        ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
index b222ce8d083effcff4b6d0cc359e903922ca1d44..e041f9c4ec47e861e75ca8fe60f5a3ced663124f 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dmaengine.h>
-#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
@@ -973,29 +972,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/**
- * stm32_spi_setup - setup device chip select
- */
-static int stm32_spi_setup(struct spi_device *spi_dev)
-{
-       int ret = 0;
-
-       if (!gpio_is_valid(spi_dev->cs_gpio)) {
-               dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
-                       spi_dev->cs_gpio);
-               return -EINVAL;
-       }
-
-       dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
-               spi_dev->cs_gpio,
-               (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
-
-       ret = gpio_direction_output(spi_dev->cs_gpio,
-                                   !(spi_dev->mode & SPI_CS_HIGH));
-
-       return ret;
-}
-
 /**
  * stm32_spi_prepare_msg - set up the controller to transfer a single message
  */
@@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct stm32_spi *spi;
        struct resource *res;
-       int i, ret;
+       int ret;
 
        master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
        if (!master) {
@@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev)
        master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
        master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
        master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
-       master->setup = stm32_spi_setup;
+       master->use_gpio_descriptors = true;
        master->prepare_message = stm32_spi_prepare_msg;
        master->transfer_one = stm32_spi_transfer_one;
        master->unprepare_message = stm32_spi_unprepare_msg;
 
-       spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
-       if (!spi->dma_tx)
+       spi->dma_tx = dma_request_chan(spi->dev, "tx");
+       if (IS_ERR(spi->dma_tx)) {
+               ret = PTR_ERR(spi->dma_tx);
+               spi->dma_tx = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto err_clk_disable;
+
                dev_warn(&pdev->dev, "failed to request tx dma channel\n");
-       else
+       } else {
                master->dma_tx = spi->dma_tx;
+       }
+
+       spi->dma_rx = dma_request_chan(spi->dev, "rx");
+       if (IS_ERR(spi->dma_rx)) {
+               ret = PTR_ERR(spi->dma_rx);
+               spi->dma_rx = NULL;
+               if (ret == -EPROBE_DEFER)
+                       goto err_dma_release;
 
-       spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
-       if (!spi->dma_rx)
                dev_warn(&pdev->dev, "failed to request rx dma channel\n");
-       else
+       } else {
                master->dma_rx = spi->dma_rx;
+       }
 
        if (spi->dma_tx || spi->dma_rx)
                master->can_dma = stm32_spi_can_dma;
@@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "spi master registration failed: %d\n",
                        ret);
-               goto err_dma_release;
+               goto err_pm_disable;
        }
 
-       if (!master->cs_gpios) {
+       if (!master->cs_gpiods) {
                dev_err(&pdev->dev, "no CS gpios available\n");
                ret = -EINVAL;
-               goto err_dma_release;
-       }
-
-       for (i = 0; i < master->num_chipselect; i++) {
-               if (!gpio_is_valid(master->cs_gpios[i])) {
-                       dev_err(&pdev->dev, "%i is not a valid gpio\n",
-                               master->cs_gpios[i]);
-                       ret = -EINVAL;
-                       goto err_dma_release;
-               }
-
-               ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
-                                       DRIVER_NAME);
-               if (ret) {
-                       dev_err(&pdev->dev, "can't get CS gpio %i\n",
-                               master->cs_gpios[i]);
-                       goto err_dma_release;
-               }
+               goto err_pm_disable;
        }
 
        dev_info(&pdev->dev, "driver initialized\n");
 
        return 0;
 
+err_pm_disable:
+       pm_runtime_disable(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
        if (spi->dma_rx)
                dma_release_channel(spi->dma_rx);
-
-       pm_runtime_disable(&pdev->dev);
 err_clk_disable:
        clk_disable_unprepare(spi->clk);
 err_master_put:
index fc40ab146c865ff52e85c208c07c446ffb9dc53a..83edabdb41ade9641f4c3e30c9c91e3999553f8e 100644 (file)
@@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param(
 
        if ((bits_per_word == 8 || bits_per_word == 16 ||
             bits_per_word == 32) && t->len > 3) {
-               tspi->is_packed = 1;
+               tspi->is_packed = true;
                tspi->words_per_32bit = 32/bits_per_word;
        } else {
-               tspi->is_packed = 0;
+               tspi->is_packed = false;
                tspi->words_per_32bit = 1;
        }
 
index 3cb65371ae3bd45873f573fef70de052dfca373b..366a3e5cca6b7047e63b8be8bce58eeed252910d 100644 (file)
@@ -62,6 +62,7 @@ struct ti_qspi {
        u32 dc;
 
        bool mmap_enabled;
+       int current_cs;
 };
 
 #define QSPI_PID                       (0x0)
@@ -79,8 +80,6 @@ struct ti_qspi {
 
 #define QSPI_COMPLETION_TIMEOUT                msecs_to_jiffies(2000)
 
-#define QSPI_FCLK                      192000000
-
 /* Clock Control */
 #define QSPI_CLK_EN                    (1 << 31)
 #define QSPI_CLK_DIV_MAX               0xffff
@@ -315,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
 {
        int wlen;
        unsigned int cmd;
+       u32 rx;
+       u8 rxlen, rx_wlen;
        u8 *rxbuf;
 
        rxbuf = t->rx_buf;
@@ -331,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
                break;
        }
        wlen = t->bits_per_word >> 3;   /* in bytes */
+       rx_wlen = wlen;
 
        while (count) {
                dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
                if (qspi_is_busy(qspi))
                        return -EBUSY;
 
+               switch (wlen) {
+               case 1:
+                       /*
+                        * Optimize the 8-bit words transfers, as used by
+                        * the SPI flash devices.
+                        */
+                       if (count >= QSPI_WLEN_MAX_BYTES) {
+                               rxlen = QSPI_WLEN_MAX_BYTES;
+                       } else {
+                               rxlen = min(count, 4);
+                       }
+                       rx_wlen = rxlen << 3;
+                       cmd &= ~QSPI_WLEN_MASK;
+                       cmd |= QSPI_WLEN(rx_wlen);
+                       break;
+               default:
+                       rxlen = wlen;
+                       break;
+               }
+
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
                if (ti_qspi_poll_wc(qspi)) {
                        dev_err(qspi->dev, "read timed out\n");
                        return -ETIMEDOUT;
                }
+
                switch (wlen) {
                case 1:
-                       *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+                       /*
+                        * Optimize the 8-bit words transfers, as used by
+                        * the SPI flash devices.
+                        */
+                       if (count >= QSPI_WLEN_MAX_BYTES) {
+                               u32 *rxp = (u32 *) rxbuf;
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+                               *rxp++ = be32_to_cpu(rx);
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+                               *rxp++ = be32_to_cpu(rx);
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+                               *rxp++ = be32_to_cpu(rx);
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+                               *rxp++ = be32_to_cpu(rx);
+                       } else {
+                               u8 *rxp = rxbuf;
+                               rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+                               if (rx_wlen >= 8)
+                                       *rxp++ = rx >> (rx_wlen - 8);
+                               if (rx_wlen >= 16)
+                                       *rxp++ = rx >> (rx_wlen - 16);
+                               if (rx_wlen >= 24)
+                                       *rxp++ = rx >> (rx_wlen - 24);
+                               if (rx_wlen >= 32)
+                                       *rxp++ = rx;
+                       }
                        break;
                case 2:
                        *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
@@ -353,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
                        *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
                        break;
                }
-               rxbuf += wlen;
-               count -= wlen;
+               rxbuf += rxlen;
+               count -= rxlen;
        }
 
        return 0;
@@ -487,6 +535,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
                                   MEM_CS_EN(spi->chip_select));
        }
        qspi->mmap_enabled = true;
+       qspi->current_cs = spi->chip_select;
 }
 
 static void ti_qspi_disable_memory_map(struct spi_device *spi)
@@ -498,6 +547,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
                regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
                                   MEM_CS_MASK, 0);
        qspi->mmap_enabled = false;
+       qspi->current_cs = -1;
 }
 
 static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
@@ -524,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
                      QSPI_SPI_SETUP_REG(spi->chip_select));
 }
 
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+       struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+       size_t max_len;
+
+       if (op->data.dir == SPI_MEM_DATA_IN) {
+               if (op->addr.val < qspi->mmap_size) {
+                       /* Limit MMIO to the mmaped region */
+                       if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+                               max_len = qspi->mmap_size - op->addr.val;
+                               op->data.nbytes = min((size_t) op->data.nbytes,
+                                                     max_len);
+                       }
+               } else {
+                       /*
+                        * Use fallback mode (SW generated transfers) above the
+                        * mmaped region.
+                        * Adjust size to comply with the QSPI max frame length.
+                        */
+                       max_len = QSPI_FRAME;
+                       max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+                       op->data.nbytes = min((size_t) op->data.nbytes,
+                                             max_len);
+               }
+       }
+
+       return 0;
+}
+
 static int ti_qspi_exec_mem_op(struct spi_mem *mem,
                               const struct spi_mem_op *op)
 {
@@ -543,7 +622,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
 
        mutex_lock(&qspi->list_lock);
 
-       if (!qspi->mmap_enabled)
+       if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select)
                ti_qspi_enable_memory_map(mem->spi);
        ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
                                op->addr.nbytes, op->dummy.nbytes);
@@ -574,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
 
 static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
        .exec_op = ti_qspi_exec_mem_op,
+       .adjust_op_size = ti_qspi_adjust_op_size,
 };
 
 static int ti_qspi_start_transfer_one(struct spi_master *master,
@@ -799,6 +879,7 @@ no_dma:
                }
        }
        qspi->mmap_enabled = false;
+       qspi->current_cs = -1;
 
        ret = devm_spi_register_master(&pdev->dev, master);
        if (!ret)
index 223353fa2d8ab248d0331209849857efea558a74..d7ea6af74743e297c589efdbde246197c95b0688 100644 (file)
@@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
        /* Set Tx DMA */
        param = &dma->param_tx;
        param->dma_dev = &dma_dev->dev;
-       param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+       param->chan_id = data->ch * 2; /* Tx = 0, 2 */
        param->tx_reg = data->io_base_addr + PCH_SPDWR;
        param->width = width;
        chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
        /* Set Rx DMA */
        param = &dma->param_rx;
        param->dma_dev = &dma_dev->dev;
-       param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+       param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
        param->rx_reg = data->io_base_addr + PCH_SPDRR;
        param->width = width;
        chan = dma_request_channel(mask, pch_spi_filter, param);
index 47cde1864630e02b5e3c8d253bb636c698cebdb9..0fa50979644d448768e94c6d8c94a9f28a872321 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -23,6 +24,7 @@
 
 struct uniphier_spi_priv {
        void __iomem *base;
+       dma_addr_t base_dma_addr;
        struct clk *clk;
        struct spi_master *master;
        struct completion xfer_done;
@@ -32,6 +34,7 @@ struct uniphier_spi_priv {
        unsigned int rx_bytes;
        const u8 *tx_buf;
        u8 *rx_buf;
+       atomic_t dma_busy;
 
        bool is_save_param;
        u8 bits_per_word;
@@ -61,11 +64,16 @@ struct uniphier_spi_priv {
 #define   SSI_FPS_FSTRT                BIT(14)
 
 #define SSI_SR                 0x14
+#define   SSI_SR_BUSY          BIT(7)
 #define   SSI_SR_RNE           BIT(0)
 
 #define SSI_IE                 0x18
+#define   SSI_IE_TCIE          BIT(4)
 #define   SSI_IE_RCIE          BIT(3)
+#define   SSI_IE_TXRE          BIT(2)
+#define   SSI_IE_RXRE          BIT(1)
 #define   SSI_IE_RORIE         BIT(0)
+#define   SSI_IE_ALL_MASK      GENMASK(4, 0)
 
 #define SSI_IS                 0x1c
 #define   SSI_IS_RXRS          BIT(9)
@@ -87,15 +95,19 @@ struct uniphier_spi_priv {
 #define SSI_RXDR               0x24
 
 #define SSI_FIFO_DEPTH         8U
+#define SSI_FIFO_BURST_NUM     1
+
+#define SSI_DMA_RX_BUSY                BIT(1)
+#define SSI_DMA_TX_BUSY                BIT(0)
 
 static inline unsigned int bytes_per_word(unsigned int bits)
 {
        return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
 }
 
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+                                          u32 mask)
 {
-       struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
        u32 val;
 
        val = readl(priv->base + SSI_IE);
@@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
        writel(val, priv->base + SSI_IE);
 }
 
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+                                           u32 mask)
 {
-       struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
        u32 val;
 
        val = readl(priv->base + SSI_IE);
@@ -290,25 +302,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
        }
 }
 
-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
+                                           unsigned int threshold)
 {
-       unsigned int fifo_threshold, fill_bytes;
        u32 val;
 
-       fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
-                               bytes_per_word(priv->bits_per_word));
-       fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
-
-       fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
-
-       /* set fifo threshold */
        val = readl(priv->base + SSI_FC);
        val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
-       val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
-       val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
+       val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
+       val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
        writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+       unsigned int fifo_threshold, fill_words;
+       unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+       fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
+       fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+       uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
+
+       fill_words = fifo_threshold -
+               DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
 
-       while (fill_bytes--)
+       while (fill_words--)
                uniphier_spi_send(priv);
 }
 
@@ -327,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
        writel(val, priv->base + SSI_FPS);
 }
 
+static bool uniphier_spi_can_dma(struct spi_master *master,
+                                struct spi_device *spi,
+                                struct spi_transfer *t)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+       if ((!master->dma_tx && !master->dma_rx)
+           || (!master->dma_tx && t->tx_buf)
+           || (!master->dma_rx && t->rx_buf))
+               return false;
+
+       return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+       struct spi_master *master = data;
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+       uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+       if (!(state & SSI_DMA_TX_BUSY))
+               spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+       struct spi_master *master = data;
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+       uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+       if (!(state & SSI_DMA_RX_BUSY))
+               spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+                                        struct spi_device *spi,
+                                        struct spi_transfer *t)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+       int buswidth;
+
+       atomic_set(&priv->dma_busy, 0);
+
+       uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+       if (priv->bits_per_word <= 8)
+               buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       else if (priv->bits_per_word <= 16)
+               buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       else
+               buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+       if (priv->rx_buf) {
+               struct dma_slave_config rxconf = {
+                       .direction = DMA_DEV_TO_MEM,
+                       .src_addr = priv->base_dma_addr + SSI_RXDR,
+                       .src_addr_width = buswidth,
+                       .src_maxburst = SSI_FIFO_BURST_NUM,
+               };
+
+               dmaengine_slave_config(master->dma_rx, &rxconf);
+
+               rxdesc = dmaengine_prep_slave_sg(
+                       master->dma_rx,
+                       t->rx_sg.sgl, t->rx_sg.nents,
+                       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!rxdesc)
+                       goto out_err_prep;
+
+               rxdesc->callback = uniphier_spi_dma_rxcb;
+               rxdesc->callback_param = master;
+
+               uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+               atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+               dmaengine_submit(rxdesc);
+               dma_async_issue_pending(master->dma_rx);
+       }
+
+       if (priv->tx_buf) {
+               struct dma_slave_config txconf = {
+                       .direction = DMA_MEM_TO_DEV,
+                       .dst_addr = priv->base_dma_addr + SSI_TXDR,
+                       .dst_addr_width = buswidth,
+                       .dst_maxburst = SSI_FIFO_BURST_NUM,
+               };
+
+               dmaengine_slave_config(master->dma_tx, &txconf);
+
+               txdesc = dmaengine_prep_slave_sg(
+                       master->dma_tx,
+                       t->tx_sg.sgl, t->tx_sg.nents,
+                       DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!txdesc)
+                       goto out_err_prep;
+
+               txdesc->callback = uniphier_spi_dma_txcb;
+               txdesc->callback_param = master;
+
+               uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+               atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+               dmaengine_submit(txdesc);
+               dma_async_issue_pending(master->dma_tx);
+       }
+
+       /* signal that we need to wait for completion */
+       return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+       if (rxdesc)
+               dmaengine_terminate_sync(master->dma_rx);
+
+       return -EINVAL;
+}
+
 static int uniphier_spi_transfer_one_irq(struct spi_master *master,
                                         struct spi_device *spi,
                                         struct spi_transfer *t)
@@ -339,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
 
        uniphier_spi_fill_tx_fifo(priv);
 
-       uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+       uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
 
        time_left = wait_for_completion_timeout(&priv->xfer_done,
                                        msecs_to_jiffies(SSI_TIMEOUT_MS));
 
-       uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+       uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
 
        if (!time_left) {
                dev_err(dev, "transfer timeout.\n");
@@ -388,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
 {
        struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
        unsigned long threshold;
+       bool use_dma;
 
        /* Terminate and return success for 0 byte length transfer */
        if (!t->len)
@@ -395,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
 
        uniphier_spi_setup_transfer(spi, t);
 
+       use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+       if (use_dma)
+               return uniphier_spi_transfer_one_dma(master, spi, t);
+
        /*
         * If the transfer operation will take longer than
         * SSI_POLL_TIMEOUT_US, it should use irq.
@@ -425,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
        return 0;
 }
 
+static void uniphier_spi_handle_err(struct spi_master *master,
+                                   struct spi_message *msg)
+{
+       struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+       u32 val;
+
+       /* stop running spi transfer */
+       writel(0, priv->base + SSI_CTL);
+
+       /* reset FIFOs */
+       val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+       writel(val, priv->base + SSI_FC);
+
+       uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+       if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+               dmaengine_terminate_async(master->dma_tx);
+               atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+       }
+
+       if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+               dmaengine_terminate_async(master->dma_rx);
+               atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+       }
+}
+
 static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
 {
        struct uniphier_spi_priv *priv = dev_id;
@@ -470,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev)
 {
        struct uniphier_spi_priv *priv;
        struct spi_master *master;
+       struct resource *res;
+       struct dma_slave_caps caps;
+       u32 dma_tx_burst = 0, dma_rx_burst = 0;
        unsigned long clk_rate;
        int irq;
        int ret;
@@ -484,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev)
        priv->master = master;
        priv->is_save_param = false;
 
-       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
                goto out_master_put;
        }
+       priv->base_dma_addr = res->start;
 
        priv->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(priv->clk)) {
@@ -531,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev)
                                = uniphier_spi_prepare_transfer_hardware;
        master->unprepare_transfer_hardware
                                = uniphier_spi_unprepare_transfer_hardware;
+       master->handle_err = uniphier_spi_handle_err;
+       master->can_dma = uniphier_spi_can_dma;
+
        master->num_chipselect = 1;
+       master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+       master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+       if (IS_ERR_OR_NULL(master->dma_tx)) {
+               if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+                       goto out_disable_clk;
+               master->dma_tx = NULL;
+               dma_tx_burst = INT_MAX;
+       } else {
+               ret = dma_get_slave_caps(master->dma_tx, &caps);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+                               ret);
+                       goto out_disable_clk;
+               }
+               dma_tx_burst = caps.max_burst;
+       }
+
+       master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+       if (IS_ERR_OR_NULL(master->dma_rx)) {
+               if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+                       goto out_disable_clk;
+               master->dma_rx = NULL;
+               dma_rx_burst = INT_MAX;
+       } else {
+               ret = dma_get_slave_caps(master->dma_rx, &caps);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+                               ret);
+                       goto out_disable_clk;
+               }
+               dma_rx_burst = caps.max_burst;
+       }
+
+       master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
 
        ret = devm_spi_register_master(&pdev->dev, master);
        if (ret)
@@ -551,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev)
 {
        struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
 
+       if (priv->master->dma_tx)
+               dma_release_channel(priv->master->dma_tx);
+       if (priv->master->dma_rx)
+               dma_release_channel(priv->master->dma_rx);
+
        clk_disable_unprepare(priv->clk);
 
        return 0;
index 5e4c4532f7f326f74dc7d816db7699ea6d03e09c..38b4c78df506c060fa49d9b86e97020e7a910b48 100644 (file)
@@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work)
  *                         advances its @tx buffer pointer monotonically.
  * @ctlr: Pointer to the spi_controller structure of the driver
  * @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
- *     preparing to transmit right now.
+ * @progress: How many words (not bytes) have been transferred so far
  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
  *           transfer, for less jitter in time measurement. Only compatible
  *           with PIO drivers. If true, must follow up with
@@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work)
  */
 void spi_take_timestamp_pre(struct spi_controller *ctlr,
                            struct spi_transfer *xfer,
-                           const void *tx, bool irqs_off)
+                           size_t progress, bool irqs_off)
 {
-       u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
        if (!xfer->ptp_sts)
                return;
 
        if (xfer->timestamped_pre)
                return;
 
-       if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+       if (progress < xfer->ptp_sts_word_pre)
                return;
 
        /* Capture the resolution of the timestamp */
-       xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+       xfer->ptp_sts_word_pre = progress;
 
        xfer->timestamped_pre = true;
 
@@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
  *                          timestamped.
  * @ctlr: Pointer to the spi_controller structure of the driver
  * @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
- *     just transmitted.
+ * @progress: How many words (not bytes) have been transferred so far
  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
  */
 void spi_take_timestamp_post(struct spi_controller *ctlr,
                             struct spi_transfer *xfer,
-                            const void *tx, bool irqs_off)
+                            size_t progress, bool irqs_off)
 {
-       u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
        if (!xfer->ptp_sts)
                return;
 
        if (xfer->timestamped_post)
                return;
 
-       if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+       if (progress < xfer->ptp_sts_word_post)
                return;
 
        ptp_read_system_postts(xfer->ptp_sts);
@@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
        }
 
        /* Capture the resolution of the timestamp */
-       xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+       xfer->ptp_sts_word_post = progress;
 
        xfer->timestamped_post = true;
 }
@@ -1680,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
                }
        }
 
+       if (unlikely(ctlr->ptp_sts_supported)) {
+               list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+                       WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
+                       WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
+               }
+       }
+
        spi_unmap_msg(ctlr, mesg);
 
        if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -2457,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
        int nb, i;
        struct gpio_desc **cs;
        struct device *dev = &ctlr->dev;
+       unsigned long native_cs_mask = 0;
+       unsigned int num_cs_gpios = 0;
 
        nb = gpiod_count(dev, "cs");
        ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
@@ -2498,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
                        if (!gpioname)
                                return -ENOMEM;
                        gpiod_set_consumer_name(cs[i], gpioname);
+                       num_cs_gpios++;
+                       continue;
                }
+
+               if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+                       dev_err(dev, "Invalid native chip select %d\n", i);
+                       return -EINVAL;
+               }
+               native_cs_mask |= BIT(i);
+       }
+
+       ctlr->unused_native_cs = ffz(native_cs_mask);
+       if (num_cs_gpios && ctlr->max_native_cs &&
+           ctlr->unused_native_cs >= ctlr->max_native_cs) {
+               dev_err(dev, "No unused native chip select available\n");
+               return -EINVAL;
        }
 
        return 0;
index 06b68dd6e0221fcf71bb0b59ae7ed57c46fd5131..bc275968fcc6d06a2daa38b3a941614744ddff7f 100644 (file)
@@ -63,7 +63,7 @@ int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports
        for (i = 0; i < 2; i++) {
                void __iomem *uart_regs;
 
-               uart_regs = ioremap_nocache(SSB_EUART, 16);
+               uart_regs = ioremap(SSB_EUART, 16);
                if (uart_regs) {
                        uart_regs += (i * 8);
 
index 6a5622e0ded5443f02a8be85763a326be462be45..c1186415896bacc56fcb0c65ac4420247afcdaa7 100644 (file)
@@ -122,7 +122,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc,
        if (unlikely(!addr))
                goto out;
        err = -ENOMEM;
-       mmio = ioremap_nocache(addr, len);
+       mmio = ioremap(addr, len);
        if (!mmio)
                goto out;
 
@@ -168,7 +168,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc,
        if (unlikely(!addr))
                goto out;
        err = -ENOMEM;
-       mmio = ioremap_nocache(addr, len);
+       mmio = ioremap(addr, len);
        if (!mmio)
                goto out;
 
@@ -382,7 +382,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
        /* Ok, ready to run, register it to the system.
         * The following needs change, if we want to port hostmode
         * to non-MIPS platform. */
-       ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000);
+       ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000);
        set_io_port_base(ssb_pcicore_controller.io_map_base);
        /* Give some time to the PCI controller to configure itself with the new
         * values. Not waiting at this point causes crashes of the machine. */
index 3fffe4d6f327f1ba4c2ea3df8c681811f1c3c7e3..f180a8e9f58af6c331805ec1ab80d29cd0112a5a 100644 (file)
@@ -4,7 +4,7 @@
 #
 config XIL_AXIS_FIFO
        tristate "Xilinx AXI-Stream FIFO IP core driver"
-       depends on OF
+       depends on OF && HAS_IOMEM
        help
          This adds support for the Xilinx AXI-Stream FIFO IP core driver.
          The AXI Streaming FIFO allows memory mapped access to a AXI Streaming
index dbff0f7e7cf5272212c2d16bfffcad9a3aff8ca8..ddc0dc93d08b64182cc9d2b56325bde0e3af0931 100644 (file)
@@ -46,8 +46,8 @@
 #define PCI171X_RANGE_UNI      BIT(4)
 #define PCI171X_RANGE_GAIN(x)  (((x) & 0x7) << 0)
 #define PCI171X_MUX_REG                0x04    /* W:   A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x)   (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x)   (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x)   (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x)   (((x) & 0xff) << 0)
 #define PCI171X_MUX_CHAN(x)    (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
 #define PCI171X_STATUS_REG     0x06    /* R:   status register */
 #define PCI171X_STATUS_IRQ     BIT(11) /* 1=IRQ occurred */
index 4bdf44d8287996fb9a07af330316744078cb0eb0..dc62db1ee1dde83b3dfd550bf1cb9eaae5a3ea25 100644 (file)
@@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
                    dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
                                       &devpriv->dio_buffer_phys_addr[i],
                                       GFP_KERNEL);
+               if (!devpriv->dio_buffer[i]) {
+                       dev_warn(dev->class_dev,
+                                "failed to allocate DMA buffer\n");
+                       return -ENOMEM;
+               }
        }
        /* allocate dma descriptors */
        devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
@@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
                                               NUM_DMA_DESCRIPTORS,
                                               &devpriv->dma_desc_phys_addr,
                                               GFP_KERNEL);
+       if (!devpriv->dma_desc) {
+               dev_warn(dev->class_dev,
+                        "failed to allocate DMA descriptors\n");
+               return -ENOMEM;
+       }
        if (devpriv->dma_desc_phys_addr & 0xf) {
                dev_warn(dev->class_dev,
                         " dma descriptors not quad-word aligned (bug)\n");
index 673d732dcb8faefe70039b08e4fa39cf342ef365..8f398b30f5bf9d6a4572519073e3de81548b5bff 100644 (file)
@@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family,
                }
        }
 
-       if (!rv)
-               return -ENODATA;
-
        /* Second, find the set of routes valid for this device. */
        for (i = 0; ni_device_routes_list[i]; ++i) {
                if (memcmp(ni_device_routes_list[i]->device, board_name,
@@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family,
                }
        }
 
-       if (!dr)
-               return -ENODATA;
-
        tables->route_values = rv;
        tables->valid_routes = dr;
 
+       if (!rv || !dr)
+               return -ENODATA;
+
        return 0;
 }
 
@@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest,
 {
        int src;
 
+       if (!tables->route_values)
+               return -EINVAL;
+
        dest = B(dest); /* subtract NI names offset */
        /* ensure we are not going to under/over run the route value table */
        if (dest < 0 || dest >= NI_NUM_NAMES)
index 2aac1e000977ef5e673e7d310ac2c0cbaae6e7a9..51c665a924b76cfb9b306af51b4dabe5a933d0d7 100644 (file)
@@ -805,8 +805,8 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
 s32 create_file(struct inode *inode, struct chain_t *p_dir,
                struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid);
 void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry);
-s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
-               struct uni_name_t *p_uniname, struct file_id_t *fid);
+s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
+                     struct uni_name_t *p_uniname, struct file_id_t *fid);
 s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
              struct chain_t *p_newdir, struct uni_name_t *p_uniname,
              struct file_id_t *fid);
index d2d3447083c7bd353a4d030574b31b72d8ce6309..794000e7bc6fae09ec157af540f34a6fc0ff9070 100644 (file)
@@ -192,8 +192,6 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
 
        exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
 
-       return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-
 #ifdef CONFIG_EXFAT_DISCARD
        if (opts->discard) {
                ret = sb_issue_discard(sb, START_SECTOR(clu),
@@ -202,9 +200,13 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
                if (ret == -EOPNOTSUPP) {
                        pr_warn("discard not supported by device, disabling");
                        opts->discard = 0;
+               } else {
+                       return ret;
                }
        }
 #endif /* CONFIG_EXFAT_DISCARD */
+
+       return sector_write(sb, sector, p_fs->vol_amap[i], 0);
 }
 
 static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
@@ -2322,8 +2324,8 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
        fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
 }
 
-s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
-               struct uni_name_t *p_uniname, struct file_id_t *fid)
+s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
+                     struct uni_name_t *p_uniname, struct file_id_t *fid)
 {
        s32 ret, newentry = -1, num_old_entries, num_new_entries;
        sector_t sector_old, sector_new;
index 6e481908c59f642645824df070eb3efc7266f3dd..9f91853b189b01ee71143b1932ac1424bce6a15d 100644 (file)
@@ -1262,8 +1262,8 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
        fs_set_vol_flags(sb, VOL_DIRTY);
 
        if (olddir.dir == newdir.dir)
-               ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name,
-                                 fid);
+               ret = exfat_rename_file(new_parent_inode, &olddir, dentry,
+                                       &uni_name, fid);
        else
                ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
                                &uni_name, fid);
index e763205e9e4ff5252a37610a84faffb7404a08f3..f61e373c75e9660897e7f1557bd1ed46353f6655 100644 (file)
@@ -63,11 +63,17 @@ static int init_display(struct fbtft_par *par)
 {
        int ret;
 
-       /* Set CS active high */
-       par->spi->mode |= SPI_CS_HIGH;
+       /*
+        * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
+        * work with GPIO based chip selects that are logically active high
+        * but inverted inside the GPIO library, so enforce inverted
+        * semantics.
+        */
+       par->spi->mode ^= SPI_CS_HIGH;
        ret = spi_setup(par->spi);
        if (ret) {
-               dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+               dev_err(par->info->device,
+                       "Could not set inverse CS polarity\n");
                return ret;
        }
 
index 27cc8eabcbe9dc598561087e9d9555d0098b31a8..76b25df376b8f0f5a696e188652da0d57abffae6 100644 (file)
@@ -150,10 +150,17 @@ static int init_display(struct fbtft_par *par)
 
        /* enable SPI interface by having CS and MOSI low during reset */
        save_mode = par->spi->mode;
-       par->spi->mode |= SPI_CS_HIGH;
-       ret = spi_setup(par->spi); /* set CS inactive low */
+       /*
+        * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
+        * work with GPIO based chip selects that are logically active high
+        * but inverted inside the GPIO library, so enforce inverted
+        * semantics.
+        */
+       par->spi->mode ^= SPI_CS_HIGH;
+       ret = spi_setup(par->spi);
        if (ret) {
-               dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+               dev_err(par->info->device,
+                       "Could not set inverse CS polarity\n");
                return ret;
        }
        write_reg(par, 0x00); /* make sure mode is set */
index ffb84987dd867fb5030ec56bf2cfc261bb7d605f..d3e098b41b1a4698878950299a4cf2273798f42a 100644 (file)
@@ -913,7 +913,7 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
        if (count == 0)
                return -EINVAL;
 
-       values = kmalloc_array(count, sizeof(*values), GFP_KERNEL);
+       values = kmalloc_array(count + 1, sizeof(*values), GFP_KERNEL);
        if (!values)
                return -ENOMEM;
 
@@ -926,9 +926,9 @@ static int fbtft_init_display_from_property(struct fbtft_par *par)
                gpiod_set_value(par->gpio.cs, 0);  /* Activate chip */
 
        index = -1;
-       while (index < count) {
-               val = values[++index];
+       val = values[++index];
 
+       while (index < count) {
                if (val & FBTFT_OF_INIT_CMD) {
                        val &= 0xFFFF;
                        i = 0;
index cd8be80d2076b10247bede882396db865c31e2ef..be6b50f454b43cc8872f44d0ba5b3693d8d94053 100644 (file)
@@ -303,7 +303,7 @@ static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
        }
 
        gasket_dev->bar_data[bar_num].virt_base =
-               ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
+               ioremap(gasket_dev->bar_data[bar_num].phys_base,
                                gasket_dev->bar_data[bar_num].length_bytes);
        if (!gasket_dev->bar_data[bar_num].virt_base) {
                dev_err(gasket_dev->dev,
index fb395cfe6b92792fbe7e02e4009fb1e8111dea2f..f20ab21a6b2ad9ccffe57c06ce28122d7efa741f 100644 (file)
@@ -6,6 +6,7 @@
 config NET_VENDOR_HP
        bool "HP devices"
        default y
+       depends on ETHERNET
        depends on ISA || EISA || PCI
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
index 1b9b43659bdf4159ab12af55d6a9a5b96b2b6087..a20c0bfa68f387dc69a48fe8732eebabc0d4a5be 100644 (file)
@@ -571,8 +571,7 @@ static int gigaset_initcshw(struct cardstate *cs)
 {
        struct usb_cardstate *ucs;
 
-       cs->hw.usb = ucs =
-               kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
+       cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
        if (!ucs) {
                pr_err("out of memory\n");
                return -ENOMEM;
@@ -584,9 +583,6 @@ static int gigaset_initcshw(struct cardstate *cs)
        ucs->bchars[3] = 0;
        ucs->bchars[4] = 0x11;
        ucs->bchars[5] = 0x13;
-       ucs->bulk_out_buffer = NULL;
-       ucs->bulk_out_urb = NULL;
-       ucs->read_urb = NULL;
        tasklet_init(&cs->write_tasklet,
                     gigaset_modem_fill, (unsigned long) cs);
 
@@ -685,6 +681,11 @@ static int gigaset_probe(struct usb_interface *interface,
                return -ENODEV;
        }
 
+       if (hostif->desc.bNumEndpoints < 2) {
+               dev_err(&interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
        dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
 
        /* allocate memory for our device state and initialize it */
@@ -704,6 +705,12 @@ static int gigaset_probe(struct usb_interface *interface,
 
        endpoint = &hostif->endpoint[0].desc;
 
+       if (!usb_endpoint_is_bulk_out(endpoint)) {
+               dev_err(&interface->dev, "missing bulk-out endpoint\n");
+               retval = -ENODEV;
+               goto error;
+       }
+
        buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
        ucs->bulk_out_size = buffer_size;
        ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
@@ -723,6 +730,12 @@ static int gigaset_probe(struct usb_interface *interface,
 
        endpoint = &hostif->endpoint[1].desc;
 
+       if (!usb_endpoint_is_int_in(endpoint)) {
+               dev_err(&interface->dev, "missing int-in endpoint\n");
+               retval = -ENODEV;
+               goto error;
+       }
+
        ucs->busy = 0;
 
        ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
index 0a23727d0dc3add90fd96de880b406286f9bda8b..93cf28febdf60f6923c7bc8c507e418204f027e8 100644 (file)
@@ -338,7 +338,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
        reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR);
        reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR);
 
-       pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE);
+       pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE);
        if (!pcard->regs_bar_base) {
                dev_err(&pcard->pdev->dev,
                        "probe: REG_BAR could not remap memory to virtual space\n");
@@ -367,7 +367,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
        dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR);
        dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR);
 
-       pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr,
+       pcard->dma_bar_base = ioremap(dma_bar_phys_addr,
                                              dma_bar_phys_len);
        if (!pcard->dma_bar_base) {
                dev_err(&pcard->pdev->dev,
index 5460bf973c9cff8c592379471abe05b256e9408a..592099a1fca55d13047f6adffdb319bfeb206015 100644 (file)
@@ -659,7 +659,7 @@ static int pi2c_probe(struct platform_device *pldev)
        if (!res)
                return -ENXIO;
 
-       priv->smba = (unsigned long)devm_ioremap_nocache(&pldev->dev,
+       priv->smba = (unsigned long)devm_ioremap(&pldev->dev,
                                                         res->start,
                                                         resource_size(res));
        if (!priv->smba)
index 8becf972af9c98dc05fe788272b397f41492988f..1c360daa703db418782b850159493b793d30fb1c 100644 (file)
@@ -464,7 +464,7 @@ kp_spi_probe(struct platform_device *pldev)
                goto free_master;
        }
 
-       kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start,
+       kpspi->base = devm_ioremap(&pldev->dev, r->start,
                                           resource_size(r));
 
        status = spi_register_master(master);
index a05ae6d40db9d27bf731c4660936ffba5b98488e..ec79a8500cafdb2f94c342b9ece1545a6bf949b0 100644 (file)
@@ -122,7 +122,7 @@ int  kpc_dma_probe(struct platform_device *pldev)
                rv = -ENXIO;
                goto err_kfree;
        }
-       ldev->eng_regs = ioremap_nocache(r->start, resource_size(r));
+       ldev->eng_regs = ioremap(r->start, resource_size(r));
        if (!ldev->eng_regs) {
                dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__);
                rv = -ENXIO;
index 6f0cd07847863862e25abe822884373601cfaa90..3be41698df4c8e30f26639b52e6b4fbce134d484 100644 (file)
@@ -2914,7 +2914,7 @@ static int allegro_probe(struct platform_device *pdev)
                        "regs resource missing from device tree\n");
                return -EINVAL;
        }
-       regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+       regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
        if (IS_ERR(regs)) {
                dev_err(&pdev->dev, "failed to map registers\n");
                return PTR_ERR(regs);
@@ -2932,7 +2932,7 @@ static int allegro_probe(struct platform_device *pdev)
                        "sram resource missing from device tree\n");
                return -EINVAL;
        }
-       sram_regs = devm_ioremap_nocache(&pdev->dev,
+       sram_regs = devm_ioremap(&pdev->dev,
                                         sram_res->start,
                                         resource_size(sram_res));
        if (IS_ERR(sram_regs)) {
index 08eaa0bad0de66405f3b716f239a9fdb107d39ea..1c9c3ba4d518dae30d63e387e0b97b9316dd887e 100644 (file)
@@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s {
        __u16 reserved1;
        __u32 bayer_sign;
        __u8 bayer_nf;
-       __u8 reserved2[3];
+       __u8 reserved2[7];
 } __attribute__((aligned(32))) __packed;
 
 /**
index 5319909eb2f6e81cab72ac3da2eb9fb246addd7b..e7f4ddcc1361938dacfa81f7e32160eac74993eb 100644 (file)
@@ -3,6 +3,7 @@ config OCTEON_ETHERNET
        tristate "Cavium Networks Octeon Ethernet support"
        depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
        depends on NETDEVICES
+       depends on BROKEN
        select PHYLIB
        select MDIO_OCTEON
        help
index a6886cc5654cede0dcf3dbcee370fd7be53a18c9..56d116d79e56aa531a6baa9086149267e128b1d4 100644 (file)
@@ -41,7 +41,7 @@ struct ql_stats {
        int stat_offset;
 };
 
-#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m)
 #define QL_OFF(m) offsetof(struct ql_adapter, m)
 
 static const struct ql_stats ql_gstrings_stats[] = {
index 6ad4515311f770fda2e449cb809379cde452b0e9..d890d38a1d2957bde2f5066c1359d5ed00d391e2 100644 (file)
@@ -4455,7 +4455,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
        pdev->needs_freset = 1;
        pci_save_state(pdev);
        qdev->reg_base =
-           ioremap_nocache(pci_resource_start(pdev, 1),
+           ioremap(pci_resource_start(pdev, 1),
                            pci_resource_len(pdev, 1));
        if (!qdev->reg_base) {
                dev_err(&pdev->dev, "Register mapping failed.\n");
@@ -4465,7 +4465,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
 
        qdev->doorbell_area_size = pci_resource_len(pdev, 3);
        qdev->doorbell_area =
-           ioremap_nocache(pci_resource_start(pdev, 3),
+           ioremap(pci_resource_start(pdev, 3),
                            pci_resource_len(pdev, 3));
        if (!qdev->doorbell_area) {
                dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
index 4fac9dca798e812d8f4315baff14193fb6fcf13f..b5d42f411dd810bd2670c6a34c9217609bbdbbd3 100644 (file)
@@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
        {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
        {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+       {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
        {}      /* Terminating entry */
@@ -70,7 +71,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
        phost_conf = pusbd->actconfig;
        pconf_desc = &phost_conf->desc;
 
-       phost_iface = &usb_intf->altsetting[0];
+       phost_iface = usb_intf->cur_altsetting;
        piface_desc = &phost_iface->desc;
 
        pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
index dace81a7d1bad6a8a0e622ce6f592075a5b28ab9..e895473fcfd759c388f8884bb95b94365e0c1550 100644 (file)
@@ -2463,7 +2463,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
        }
 
 
-       ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
+       ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
        if (ioaddr == (unsigned long)NULL) {
                netdev_err(dev, "ioremap failed!");
                goto err_rel_mem;
index ba1288297ee4b747b6c741e1faff5a83a08db26a..a87562f632a7ff07a4f7fb405dbb66ad4764f0da 100644 (file)
@@ -247,7 +247,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
 
        pdvobjpriv->padapter = padapter;
        padapter->eeprom_address_size = 6;
-       phost_iface = &pintf->altsetting[0];
+       phost_iface = pintf->cur_altsetting;
        piface_desc = &phost_iface->desc;
        pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
        if (pusbd->speed == USB_SPEED_HIGH) {
index cb95ad6fa4f94f96c8239b2233a80cccdaef1a74..fbb42e5258fda043b11f42932bfc086251d63fcb 100644 (file)
@@ -858,7 +858,7 @@ static int rtsx_probe(struct pci_dev *pci,
        dev_info(&pci->dev, "Resource length: 0x%x\n",
                 (unsigned int)pci_resource_len(pci, 0));
        dev->addr = pci_resource_start(pci, 0);
-       dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
+       dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0));
        if (!dev->remap_addr) {
                dev_err(&pci->dev, "ioremap error\n");
                err = -ENXIO;
index ea1d3d4efbc2d89f9b64bc848df8912684876f68..b8d60701f89862038521a66ff88acc37662797f7 100644 (file)
@@ -50,7 +50,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
        }
 
        /* now map mmio and vidmem */
-       sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start,
+       sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start,
                                           sm750_dev->vidreg_size);
        if (!sm750_dev->pvReg) {
                pr_err("mmio failed\n");
index 34020ed351abdfde601d2de48b94e39290077d3c..a5ab255d7d361309a3cca58c7c5c4ce69a1106e6 100644 (file)
@@ -216,11 +216,11 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc)
                goto error_request_region;
        }
 
-       whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
+       whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len);
        if (whcrc->rc_base == NULL) {
                dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
                        whcrc->rc_len, whcrc->area, result);
-               goto error_ioremap_nocache;
+               goto error_ioremap;
        }
 
        result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
@@ -254,7 +254,7 @@ error_cmd_buffer:
        free_irq(umc_dev->irq, whcrc);
 error_request_irq:
        iounmap(whcrc->rc_base);
-error_ioremap_nocache:
+error_ioremap:
        release_mem_region(whcrc->area, whcrc->rc_len);
 error_request_region:
        return result;
index 02148a24818a603c402963e1db1326c75eb87258..4458c1e60fa315f92d0a07b3999a008430661e5b 100644 (file)
@@ -3309,7 +3309,7 @@ static int __init vchiq_driver_init(void)
        return 0;
 
 region_unregister:
-       platform_driver_unregister(&vchiq_driver);
+       unregister_chrdev_region(vchiq_devid, 1);
 
 class_destroy:
        class_destroy(vchiq_class);
index 8d19ae71e7cc9124f666f1807e8752e379a222b3..4e651b698617f1a025b7704ead8c1f1e275728df 100644 (file)
@@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv)
 
        memcpy(array, addr, length);
 
-       ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
-                             MESSAGE_REQUEST_BBREG, length, array);
+       ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+                                    MESSAGE_REQUEST_BBREG, length, array);
        if (ret)
                goto end;
 
index 56cd77fd9ea021d9849b0e4ed5a1abc26bc9c7a6..7958fc165462fd0b951183e8b2a5bb481a113389 100644 (file)
@@ -719,7 +719,7 @@ end:
  */
 int vnt_radio_power_on(struct vnt_private *priv)
 {
-       int ret = true;
+       int ret = 0;
 
        vnt_exit_deep_sleep(priv);
 
index 6074ceda78bfc1c0b3b7f85d947c9efa5351abc6..50e1c89180409cc50f6e96633dedc46bd3b8ae95 100644 (file)
@@ -259,6 +259,7 @@ struct vnt_private {
        u8 mac_hw;
        /* netdev */
        struct usb_device *usb;
+       struct usb_interface *intf;
 
        u64 tsf_time;
        u8 rx_rate;
index 4ac85ecb0921df0b1066aa46b7b2c05ea0115b0d..9cb924c545719bd5c49cccc3f86cdd41916147c2 100644 (file)
@@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = {
 
 int vnt_init(struct vnt_private *priv)
 {
-       if (!(vnt_init_registers(priv)))
+       if (vnt_init_registers(priv))
                return -EAGAIN;
 
        SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr);
@@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
        priv = hw->priv;
        priv->hw = hw;
        priv->usb = udev;
+       priv->intf = intf;
 
        vnt_set_options(priv);
 
index d3304df6bd53de0a6db5c41f903cce5ee7561146..d977d4777e4f672889f886223b41ea3db07cb00c 100644 (file)
@@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
 
        kfree(usb_buffer);
 
-       if (ret >= 0 && ret < (int)length)
+       if (ret == (int)length)
+               ret = 0;
+       else
                ret = -EIO;
 
 end_unlock:
@@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
                               reg_off, reg, sizeof(u8), &data);
 }
 
+int vnt_control_out_blocks(struct vnt_private *priv,
+                          u16 block, u8 reg, u16 length, u8 *data)
+{
+       int ret = 0, i;
+
+       for (i = 0; i < length; i += block) {
+               u16 len = min_t(int, length - i, block);
+
+               ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE,
+                                     i, reg, len, data + i);
+               if (ret)
+                       goto end;
+       }
+end:
+       return ret;
+}
+
 int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
                   u16 index, u16 length, u8 *buffer)
 {
@@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
 
        kfree(usb_buffer);
 
-       if (ret >= 0 && ret < (int)length)
+       if (ret == (int)length)
+               ret = 0;
+       else
                ret = -EIO;
 
 end_unlock:
index 95147ec7b96ae8e63af958a7453f231e6a084825..b65d9c01a211da4dd2204fca2feab873b6a2a5ac 100644 (file)
@@ -18,6 +18,8 @@
 
 #include "device.h"
 
+#define VNT_REG_BLOCK_SIZE     64
+
 int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
                    u16 index, u16 length, u8 *buffer);
 int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
@@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
 int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
 int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
 
+int vnt_control_out_blocks(struct vnt_private *priv,
+                          u16 block, u8 reg, u16 len, u8 *data);
+
 int vnt_start_interrupt_urb(struct vnt_private *priv);
 int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
 int vnt_tx_context(struct vnt_private *priv,
index 3eb2f11a5de13676247c8daf4d2ac7407ecfe555..2c5250ca2801d455aa65408f3f07571a9c280e86 100644 (file)
@@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work)
                if (vnt_init(priv)) {
                        /* If fail all ends TODO retry */
                        dev_err(&priv->usb->dev, "failed to start\n");
+                       usb_set_intfdata(priv->intf, NULL);
                        ieee80211_free_hw(priv->hw);
                        return;
                }
index b722e9773232bea3ef1bcbf38499edcc8aa653ed..b13d7341f8bba1f523aa74dd77f60d51de5bc4e1 100644 (file)
@@ -16,7 +16,7 @@
 #include "traces.h"
 #include "hif_tx_mib.h"
 
-#define WFX_INVALID_RATE_ID (0xFF)
+#define WFX_INVALID_RATE_ID    15
 #define WFX_LINK_ID_NO_ASSOC   15
 #define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
 
@@ -184,7 +184,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
                 */
                entry = list_entry(cache->free.prev, struct tx_policy, link);
                memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
-               entry->uploaded = 0;
+               entry->uploaded = false;
                entry->usage_count = 0;
                idx = entry - cache->cache;
        }
@@ -202,6 +202,8 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
        int usage, locked;
        struct tx_policy_cache *cache = &wvif->tx_policy_cache;
 
+       if (idx == WFX_INVALID_RATE_ID)
+               return;
        spin_lock_bh(&cache->lock);
        locked = list_empty(&cache->free);
        usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
@@ -239,7 +241,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
                        dst->terminate = 1;
                        dst->count_init = 1;
                        memcpy(&dst->rates, src->rates, sizeof(src->rates));
-                       src->uploaded = 1;
+                       src->uploaded = true;
                        arg->num_tx_rate_policies++;
                }
        }
@@ -249,7 +251,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
        return 0;
 }
 
-static void wfx_tx_policy_upload_work(struct work_struct *work)
+void wfx_tx_policy_upload_work(struct work_struct *work)
 {
        struct wfx_vif *wvif =
                container_of(work, struct wfx_vif, tx_policy_upload_work);
@@ -270,7 +272,6 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
        spin_lock_init(&cache->lock);
        INIT_LIST_HEAD(&cache->used);
        INIT_LIST_HEAD(&cache->free);
-       INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
 
        for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
                list_add(&cache->cache[i].link, &cache->free);
@@ -523,9 +524,9 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
                for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
                        if (rates[i + 1].idx == rates[i].idx &&
                            rates[i].idx != -1) {
-                               rates[i].count =
-                                       max_t(int, rates[i].count,
-                                             rates[i + 1].count);
+                               rates[i].count += rates[i + 1].count;
+                               if (rates[i].count > 15)
+                                       rates[i].count = 15;
                                rates[i + 1].idx = -1;
                                rates[i + 1].count = 0;
 
@@ -537,6 +538,17 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
                        }
                }
        } while (!finished);
+       // Ensure that MCS0 or 1Mbps is present at the end of the retry list
+       for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+               if (rates[i].idx == 0)
+                       break;
+               if (rates[i].idx == -1) {
+                       rates[i].idx = 0;
+                       rates[i].count = 8; // == hw->max_rate_tries
+                       rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+                       break;
+               }
+       }
        // All retries use long GI
        for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
                rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
@@ -550,7 +562,8 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
 
        rate_id = wfx_tx_policy_get(wvif,
                                    tx_info->driver_rates, &tx_policy_renew);
-       WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy");
+       if (rate_id == WFX_INVALID_RATE_ID)
+               dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
 
        if (tx_policy_renew) {
                /* FIXME: It's not so optimal to stop TX queues every now and
@@ -679,7 +692,7 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
        struct ieee80211_sta *sta = control ? control->sta : NULL;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info,
+       size_t driver_data_room = sizeof_field(struct ieee80211_tx_info,
                                               rate_driver_data);
 
        compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
@@ -735,7 +748,9 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
                rate = &tx_info->status.rates[i];
                if (rate->idx < 0)
                        break;
-               if (tx_count < rate->count && arg->status && arg->ack_failures)
+               if (tx_count < rate->count &&
+                   arg->status == HIF_STATUS_RETRY_EXCEEDED &&
+                   arg->ack_failures)
                        dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
                                rate->count, tx_count);
                if (tx_count <= rate->count && tx_count &&
index 29faa5640516d1020c728d056a3b8fafd4d2c709..0fc388db62e0dd0343869ba32c08cadb5ea7c405 100644 (file)
@@ -39,9 +39,9 @@ struct wfx_link_entry {
 
 struct tx_policy {
        struct list_head link;
+       int usage_count;
        u8 rates[12];
-       u8 usage_count;
-       u8 uploaded;
+       bool uploaded;
 };
 
 struct tx_policy_cache {
@@ -61,6 +61,7 @@ struct wfx_tx_priv {
 } __packed;
 
 void wfx_tx_policy_init(struct wfx_vif *wvif);
+void wfx_tx_policy_upload_work(struct work_struct *work);
 
 void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
            struct sk_buff *skb);
index bb091e395ff5914f754facd9be9e476aaef7991e..9be74881c56c98c4f6010bc71e101fcbec843b46 100644 (file)
@@ -147,7 +147,6 @@ static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
        }
        if (!required)
                val.unpmf_allowed = 1;
-       cpu_to_le32s((u32 *) &val);
        return hif_write_mib(wvif->wdev, wvif->id,
                             HIF_MIB_ID_PROTECTED_MGMT_POLICY,
                             &val, sizeof(val));
index 986a2ef678b9341dfe27f34ae212bf1fe3d3d099..3b47b6c21ea13ff2697a63b0205c6ae480584c13 100644 (file)
@@ -289,7 +289,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
        hw->sta_data_size = sizeof(struct wfx_sta_priv);
        hw->queues = 4;
        hw->max_rates = 8;
-       hw->max_rate_tries = 15;
+       hw->max_rate_tries = 8;
        hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) +
                                sizeof(struct hif_msg)
                                + sizeof(struct hif_req_tx)
index c7ee90888f69c21c4e8e3e8157d6fc43202547d9..680fed31cefb02b6771d5b531f7080f1fb4b05c7 100644 (file)
@@ -422,6 +422,7 @@ static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
                break;
        case do_wep:
                wfx_tx_lock(wvif->wdev);
+               WARN_ON(wvif->wep_pending_skb);
                wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
                wvif->wep_pending_skb = skb;
                if (!schedule_work(&wvif->wep_key_work))
index 29848a202ab4e16ccff65afb48ea88623214756b..471dd15b227fe7a9b3107ff6b3102ed4e0d16869 100644 (file)
@@ -592,6 +592,7 @@ static void wfx_do_unjoin(struct wfx_vif *wvif)
        wfx_tx_flush(wvif->wdev);
        hif_keep_alive_period(wvif, 0);
        hif_reset(wvif, false);
+       wfx_tx_policy_init(wvif);
        hif_set_output_power(wvif, wvif->wdev->output_power * 10);
        wvif->dtim_period = 0;
        hif_set_macaddr(wvif, wvif->vif->addr);
@@ -880,8 +881,10 @@ static int wfx_update_beaconing(struct wfx_vif *wvif)
                if (wvif->state != WFX_STATE_AP ||
                    wvif->beacon_int != conf->beacon_int) {
                        wfx_tx_lock_flush(wvif->wdev);
-                       if (wvif->state != WFX_STATE_PASSIVE)
+                       if (wvif->state != WFX_STATE_PASSIVE) {
                                hif_reset(wvif, false);
+                               wfx_tx_policy_init(wvif);
+                       }
                        wvif->state = WFX_STATE_PASSIVE;
                        wfx_start_ap(wvif);
                        wfx_tx_unlock(wvif->wdev);
@@ -1567,6 +1570,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
        INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
 
+       INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
        mutex_unlock(&wdev->conf_mutex);
 
        hif_set_macaddr(wvif, vif->addr);
index ac136663fa8e55fb08ece4416b95f52fa30ebe65..082c16a31616e10256658cfeef87a271b4d328c1 100644 (file)
@@ -4,6 +4,7 @@ config PRISM2_USB
        depends on WLAN && USB && CFG80211
        select WIRELESS_EXT
        select WEXT_PRIV
+       select CRC32
        help
          This is the wlan-ng prism 2.5/3 USB driver for a wide range of
          old USB wireless devices.
index e877b917c15f59fd6121f1c4dcca420aa2092520..30ea37e1a3f5e11021967201bd3f197030259050 100644 (file)
@@ -708,7 +708,7 @@ static int __init cxgbit_init(void)
        pr_info("%s dcb enabled.\n", DRV_NAME);
        register_dcbevent_notifier(&cxgbit_dcbevent_nb);
 #endif
-       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+       BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
                     sizeof(union cxgbit_skb_cb));
        return 0;
 }
index 7251a87bb5769bdb969566aea0b95b21aa69ca80..b94ed4e30770688891a407d1c9dba88d3110f3b8 100644 (file)
@@ -4149,9 +4149,6 @@ int iscsit_close_connection(
        iscsit_stop_nopin_response_timer(conn);
        iscsit_stop_nopin_timer(conn);
 
-       if (conn->conn_transport->iscsit_wait_conn)
-               conn->conn_transport->iscsit_wait_conn(conn);
-
        /*
         * During Connection recovery drop unacknowledged out of order
         * commands for this connection, and prepare the other commands
@@ -4237,6 +4234,9 @@ int iscsit_close_connection(
        target_sess_cmd_list_set_waiting(sess->se_sess);
        target_wait_for_sess_cmds(sess->se_sess);
 
+       if (conn->conn_transport->iscsit_wait_conn)
+               conn->conn_transport->iscsit_wait_conn(conn);
+
        ahash_request_free(conn->conn_tx_hash);
        if (conn->conn_rx_hash) {
                struct crypto_ahash *tfm;
index 6949ea8bc387c7acac2571862d36f0c3f2e0ca04..51ffd5c002dee2da58281ce0e1c2032dac1ad9e2 100644 (file)
@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
        }
 
        bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
-       bip_set_seed(bip, bio->bi_iter.bi_sector);
+       /* virtual start sector must be in integrity interval units */
+       bip_set_seed(bip, bio->bi_iter.bi_sector >>
+                                 (bi->interval_exp - SECTOR_SHIFT));
 
        pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
                 (unsigned long long)bip->bip_iter.bi_sector);
index cf3fad2cb87140b810f094d390df69f51e8e3aff..c5b17dd8f587a9d83f4a99b79ec5afd7c1753ac8 100644 (file)
@@ -47,7 +47,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
        for (slot = 0; slot < tbus->num_tcslots; slot++) {
                slotaddr = tbus->slot_base + slot * slotsize;
                extslotaddr = tbus->ext_slot_base + slot * extslotsize;
-               module = ioremap_nocache(slotaddr, slotsize);
+               module = ioremap(slotaddr, slotsize);
                BUG_ON(!module);
 
                offset = TC_OLDCARD;
index d1ad512e17089f3e524c03cbb9e783851304a202..3ca71e3812ed4bad4f83dd771af2c8ce2fbd7e85 100644 (file)
@@ -3,6 +3,7 @@
 config OPTEE
        tristate "OP-TEE"
        depends on HAVE_ARM_SMCCC
+       depends on MMU
        help
          This implements the OP-TEE Trusted Execution Environment (TEE)
          driver.
index 0332a5301d6136d83f6cd101fd7be666efc9dd17..d767eebf30bdd5625b10350904b019edbe8166ca 100644 (file)
@@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
        shm->size = PAGE_SIZE << order;
 
        if (shm->flags & TEE_SHM_DMA_BUF) {
+               unsigned int nr_pages = 1 << order, i;
+               struct page **pages;
+
+               pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+               if (!pages)
+                       return -ENOMEM;
+
+               for (i = 0; i < nr_pages; i++) {
+                       pages[i] = page;
+                       page++;
+               }
+
                shm->flags |= TEE_SHM_REGISTER;
-               rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+               rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
                                        (unsigned long)shm->kaddr);
+               kfree(pages);
        }
 
        return rc;
index 59b79fc48266e6fa087b897c1ea590462a33f7b3..79b27865c6f42c7342274904db052c138fa2e61f 100644 (file)
@@ -108,7 +108,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
 
 config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
        bool "power_allocator"
-       select THERMAL_GOV_POWER_ALLOCATOR
+       depends on THERMAL_GOV_POWER_ALLOCATOR
        help
          Select this if you want to control temperature based on
          system and device power allocation. This governor can only
index 3517883b5cdb923190edad2064ebbf7b4c7bd240..efae0c02d898b7488370e672af8c9c02650c2dac 100644 (file)
@@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
 }
 
 static const struct acpi_device_id int3400_thermal_match[] = {
+       {"INT1040", 0},
        {"INT3400", 0},
        {}
 };
index a7bbd8584ae25222343768a0bbfedd0f3f97d5f4..aeece1e136a5b8a3b749e61ead84bda95a7c4307 100644 (file)
@@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev)
 }
 
 static const struct acpi_device_id int3403_device_ids[] = {
+       {"INT1043", 0},
        {"INT3403", 0},
        {"", 0},
 };
index 015e7d2015985343edab8f139312706da4e29d1a..0e7cf52369326af5d814c9874c35faf207a98e57 100644 (file)
@@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv)
        irq = platform_get_irq_byname(pdev, "uplow");
        if (irq < 0) {
                ret = irq;
+               /* For old DTs with no IRQ defined */
+               if (irq == -ENXIO)
+                       ret = 0;
                goto err_put_device;
        }
 
index 4562c8060d09e156f1b5e4444dd021593db5983e..a6aabfd6e2da40b9c4bdbac2398de68716c376ea 100644 (file)
@@ -3256,7 +3256,7 @@ static int __init cy_detect_isa(void)
                        return nboard;
 
                /* probe for CD1400... */
-               cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
+               cy_isa_address = ioremap(isa_address, CyISA_Ywin);
                if (cy_isa_address == NULL) {
                        printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
                                        "address\n");
@@ -3690,13 +3690,13 @@ static int cy_pci_probe(struct pci_dev *pdev,
                        device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) {
                card_name = "Cyclom-Y";
 
-               addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+               addr0 = ioremap(pci_resource_start(pdev, 0),
                                CyPCI_Yctl);
                if (addr0 == NULL) {
                        dev_err(&pdev->dev, "can't remap ctl region\n");
                        goto err_reg;
                }
-               addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+               addr2 = ioremap(pci_resource_start(pdev, 2),
                                CyPCI_Ywin);
                if (addr2 == NULL) {
                        dev_err(&pdev->dev, "can't remap base region\n");
@@ -3712,7 +3712,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
        } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) {
                struct RUNTIME_9060 __iomem *ctl_addr;
 
-               ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0),
+               ctl_addr = addr0 = ioremap(pci_resource_start(pdev, 0),
                                CyPCI_Zctl);
                if (addr0 == NULL) {
                        dev_err(&pdev->dev, "can't remap ctl region\n");
@@ -3727,7 +3727,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
 
                mailbox = readl(&ctl_addr->mail_box_0);
 
-               addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
+               addr2 = ioremap(pci_resource_start(pdev, 2),
                                mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
                if (addr2 == NULL) {
                        dev_err(&pdev->dev, "can't remap base region\n");
index 4c1cd49ae95b28f24c9e0fb35e05b93a82319b1b..620d8488b83e35fd415b66a4f630b8f5cffca56a 100644 (file)
@@ -898,7 +898,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
        atomic_set(&priv->xmit_total, 0);
        raw_spin_lock_init(&priv->lock);
 
-       priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start,
+       priv->reg = devm_ioremap(priv->dev, dev->res.start,
                                         resource_size(&dev->res));
        if (!priv->reg) {
                dev_err(priv->dev, "ioremap failed for resource %pR\n",
index 3a1a5e0ee93f1d4bd4738fdade69b791376e6851..9f13f7d49dd78874283efbb3a0286a776b882dc3 100644 (file)
@@ -961,7 +961,7 @@ static int moxa_pci_probe(struct pci_dev *pdev,
                goto err;
        }
 
-       board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
+       board->basemem = ioremap(pci_resource_start(pdev, 2), 0x4000);
        if (board->basemem == NULL) {
                dev_err(&pdev->dev, "can't remap io space 2\n");
                retval = -ENOMEM;
@@ -1071,7 +1071,7 @@ static int __init moxa_init(void)
                        brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
                                        numports[i];
                        brd->busType = MOXA_BUS_TYPE_ISA;
-                       brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
+                       brd->basemem = ioremap(baseaddr[i], 0x4000);
                        if (!brd->basemem) {
                                printk(KERN_ERR "MOXA: can't remap %lx\n",
                                                baseaddr[i]);
index 226adeec2aedc0b231924a3ddaa5b743a5c850c8..ce5309d002805ef6f4f05220204c339a723965d3 100644 (file)
@@ -663,6 +663,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
        return AE_OK;
 }
 
+static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
+       { "INT3511", 0 },
+       { "INT3512", 0 },
+       { },
+};
+
 static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
                                          void *data, void **return_value)
 {
@@ -675,6 +681,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
        if (acpi_device_enumerated(adev))
                return AE_OK;
 
+       /* Skip if black listed */
+       if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
+               return AE_OK;
+
        if (acpi_serdev_check_resources(ctrl, adev))
                return AE_OK;
 
index 0809ae2aa9b141f4ef91b60518c7a566b09d8409..673cda3d011d0c64c0c71a19dfa3052447d21c4f 100644 (file)
@@ -55,7 +55,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
        uart.port.uartclk       = (dev->id.sversion != 0xad) ?
                                        7272727 : 1843200;
        uart.port.mapbase       = address;
-       uart.port.membase       = ioremap_nocache(address, 16);
+       uart.port.membase       = ioremap(address, 16);
        if (!uart.port.membase) {
                dev_warn(&dev->dev, "Failed to map memory\n");
                return -ENOMEM;
index 836e736ae188b82f01af5555e7c622854018b170..e603c66d6cc4656adda5eafd93161808e23bff50 100644 (file)
@@ -1147,7 +1147,7 @@ static int omap8250_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       membase = devm_ioremap_nocache(&pdev->dev, regs->start,
+       membase = devm_ioremap(&pdev->dev, regs->start,
                                       resource_size(regs));
        if (!membase)
                return -ENODEV;
index 022924d5ad545306699598bbb9655df16418641c..939685fed3969a35e2d7d414b9ad064ed8d054ed 100644 (file)
@@ -275,7 +275,7 @@ static int pci_plx9050_init(struct pci_dev *dev)
        /*
         * enable/disable interrupts
         */
-       p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+       p = ioremap(pci_resource_start(dev, 0), 0x80);
        if (p == NULL)
                return -ENOMEM;
        writel(irq_config, p + 0x4c);
@@ -299,7 +299,7 @@ static void pci_plx9050_exit(struct pci_dev *dev)
        /*
         * disable interrupts
         */
-       p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+       p = ioremap(pci_resource_start(dev, 0), 0x80);
        if (p != NULL) {
                writel(0, p + 0x4c);
 
@@ -475,7 +475,7 @@ static int pci_siig10x_init(struct pci_dev *dev)
                break;
        }
 
-       p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
+       p = ioremap(pci_resource_start(dev, 0), 0x80);
        if (p == NULL)
                return -ENOMEM;
 
index 90655910b0c768cb527aa97838cac9b2f4107e9b..9ff5dfad590a314ef5f650215fb8c2653d8fdbcf 100644 (file)
@@ -2766,7 +2766,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
                }
 
                if (port->flags & UPF_IOREMAP) {
-                       port->membase = ioremap_nocache(port->mapbase, size);
+                       port->membase = ioremap(port->mapbase, size);
                        if (!port->membase) {
                                release_mem_region(port->mapbase, size);
                                ret = -ENOMEM;
index a8dc8af83f39a354c1b2347a59033ef0dc0aaffd..1ba9bc667e13631328c590f484683188e7d74039 100644 (file)
@@ -2270,27 +2270,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
                mode |= ATMEL_US_USMODE_NORMAL;
        }
 
-       /* set the mode, clock divisor, parity, stop bits and data size */
-       atmel_uart_writel(port, ATMEL_US_MR, mode);
-
-       /*
-        * when switching the mode, set the RTS line state according to the
-        * new mode, otherwise keep the former state
-        */
-       if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
-               unsigned int rts_state;
-
-               if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
-                       /* let the hardware control the RTS line */
-                       rts_state = ATMEL_US_RTSDIS;
-               } else {
-                       /* force RTS line to low level */
-                       rts_state = ATMEL_US_RTSEN;
-               }
-
-               atmel_uart_writel(port, ATMEL_US_CR, rts_state);
-       }
-
        /*
         * Set the baud rate:
         * Fractional baudrate allows to setup output frequency more
@@ -2317,6 +2296,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
 
        if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
                atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+
+       /* set the mode, clock divisor, parity, stop bits and data size */
+       atmel_uart_writel(port, ATMEL_US_MR, mode);
+
+       /*
+        * when switching the mode, set the RTS line state according to the
+        * new mode, otherwise keep the former state
+        */
+       if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+               unsigned int rts_state;
+
+               if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+                       /* let the hardware control the RTS line */
+                       rts_state = ATMEL_US_RTSDIS;
+               } else {
+                       /* force RTS line to low level */
+                       rts_state = ATMEL_US_RTSEN;
+               }
+
+               atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+       }
+
        atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
        atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
        atmel_port->tx_stopped = false;
index 7b57e840e255d35de7adfb25e49b63560b01a59a..730da413d8ed80cfc39155790c0250ef030ca6a2 100644 (file)
@@ -677,7 +677,7 @@ static void dz_release_port(struct uart_port *uport)
 static int dz_map_port(struct uart_port *uport)
 {
        if (!uport->membase)
-               uport->membase = ioremap_nocache(uport->mapbase,
+               uport->membase = ioremap(uport->mapbase,
                                                 dec_kn_slot_size);
        if (!uport->membase) {
                printk(KERN_ERR "dz: Cannot map MMIO\n");
index fcbea43dc3348b5ee68816e89a2658e5af01b23c..f67226df30d449ee0c05289c6affba1a29d8574a 100644 (file)
@@ -549,7 +549,7 @@ lqasc_request_port(struct uart_port *port)
        }
 
        if (port->flags & UPF_IOREMAP) {
-               port->membase = devm_ioremap_nocache(&pdev->dev,
+               port->membase = devm_ioremap(&pdev->dev,
                        port->mapbase, size);
                if (port->membase == NULL)
                        return -ENOMEM;
index fbc5bc022a39281b5ca0b09e2085d8f4848b652a..164b18372c0202d594c2116543991ac35e9582c5 100644 (file)
@@ -411,7 +411,7 @@ static int meson_uart_request_port(struct uart_port *port)
                return -EBUSY;
        }
 
-       port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+       port->membase = devm_ioremap(port->dev, port->mapbase,
                                             port->mapsize);
        if (!port->membase)
                return -ENOMEM;
index 1cbae0768b1fda8c3b07729b08c39a545a4a80e3..f6c45a796433de838949ccf8eb4ad5d0ae441ee6 100644 (file)
@@ -1580,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
        int num_newlines = 0;
        bool replaced = false;
        void __iomem *tf;
+       int locked = 1;
 
        if (is_uartdm)
                tf = port->membase + UARTDM_TF;
@@ -1592,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
                        num_newlines++;
        count += num_newlines;
 
-       spin_lock(&port->lock);
+       if (port->sysrq)
+               locked = 0;
+       else if (oops_in_progress)
+               locked = spin_trylock(&port->lock);
+       else
+               spin_lock(&port->lock);
+
        if (is_uartdm)
                msm_reset_dm_count(port, count);
 
@@ -1628,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
                iowrite32_rep(tf, buf, 1);
                i += num_chars;
        }
-       spin_unlock(&port->lock);
+
+       if (locked)
+               spin_unlock(&port->lock);
 }
 
 static void msm_console_write(struct console *co, const char *s,
index 00ce31e8d19ad852ed025f2928ef22ae603e4ea8..fc58a004bef489d03afb9348d89492b8f73cb069 100644 (file)
@@ -474,7 +474,7 @@ static int __init mux_probe(struct parisc_device *dev)
                port->iobase    = 0;
                port->mapbase   = dev->hpa.start + MUX_OFFSET +
                                                (i * MUX_LINE_OFFSET);
-               port->membase   = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
+               port->membase   = ioremap(port->mapbase, MUX_LINE_OFFSET);
                port->iotype    = UPIO_MEM;
                port->type      = PORT_MUX;
                port->irq       = 0;
index d2d8b34946851deee4c946f5efdf2abce291c8ed..42c8cc93b603b43aa4a206099dc696889008941d 100644 (file)
@@ -427,7 +427,7 @@ static int owl_uart_request_port(struct uart_port *port)
                return -EBUSY;
 
        if (port->flags & UPF_IOREMAP) {
-               port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+               port->membase = devm_ioremap(port->dev, port->mapbase,
                                resource_size(res));
                if (!port->membase)
                        return -EBUSY;
index 0bdf1687983f5a70e3f632703ecf9fba584bafd5..484b7e8d5381a145ca561571ebe23b5107ea7be5 100644 (file)
@@ -618,7 +618,7 @@ static int pic32_uart_request_port(struct uart_port *port)
                                "pic32_uart_mem"))
                return -EBUSY;
 
-       port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+       port->membase = devm_ioremap(port->dev, port->mapbase,
                                                resource_size(res_mem));
        if (!port->membase) {
                dev_err(port->dev, "Unable to map registers\n");
index ff9a27d48bca85b4abfa4197f9f7198ed158cedd..b5ef86ae2746e4b3b3810e26345cf8f4e1b264c5 100644 (file)
@@ -498,7 +498,7 @@ static int rda_uart_request_port(struct uart_port *port)
                return -EBUSY;
 
        if (port->flags & UPF_IOREMAP) {
-               port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+               port->membase = devm_ioremap(port->dev, port->mapbase,
                                                     resource_size(res));
                if (!port->membase)
                        return -EBUSY;
index 329aced26bd84e97c7145817944f80df1c89ab90..7c99340a3d663768b206e93062ff6c4dee4434a9 100644 (file)
@@ -668,7 +668,7 @@ static int sbd_map_port(struct uart_port *uport)
        struct sbd_duart *duart = sport->duart;
 
        if (!uport->membase)
-               uport->membase = ioremap_nocache(uport->mapbase,
+               uport->membase = ioremap(uport->mapbase,
                                                 DUART_CHANREG_SPACING);
        if (!uport->membase) {
                printk(err);
@@ -676,7 +676,7 @@ static int sbd_map_port(struct uart_port *uport)
        }
 
        if (!sport->memctrl)
-               sport->memctrl = ioremap_nocache(duart->mapctrl,
+               sport->memctrl = ioremap(duart->mapctrl,
                                                 DUART_CHANREG_SPACING);
        if (!sport->memctrl) {
                printk(err);
index b0a6eb106edb83d330e484ce9777d4b96fc350da..7c278278573634df5e0e82833ad6ff827ef6a13f 100644 (file)
@@ -2834,6 +2834,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
        if (uport->cons && uport->dev)
                of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
 
+       tty_port_link_device(port, drv->tty_driver, uport->line);
        uart_configure_port(drv, state, uport);
 
        port->console = uart_console(uport);
index 58bf9d496ba596dd59056a4ab4cb04863d75ac82..87ca6294de0e3a1a08be5e8719e4c80d6ab53128 100644 (file)
@@ -2680,7 +2680,7 @@ static int sci_remap_port(struct uart_port *port)
                return 0;
 
        if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
-               port->membase = ioremap_nocache(port->mapbase, sport->reg_size);
+               port->membase = ioremap(port->mapbase, sport->reg_size);
                if (unlikely(!port->membase)) {
                        dev_err(port->dev, "can't remap port#%d\n", port->line);
                        return -ENXIO;
index 31df23502562383ddfe2cbb0e0d7d98865ed5d21..f60a59d9bf271074dfde293a695ea0d8558e91c9 100644 (file)
@@ -679,6 +679,9 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
        if (ims & SPRD_IMSR_TIMEOUT)
                serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
 
+       if (ims & SPRD_IMSR_BREAK_DETECT)
+               serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT);
+
        if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT |
                   SPRD_IMSR_TIMEOUT))
                sprd_rx(port);
index b03d3e458ea2c5ffe48f006eea95891729bd9b8c..89154ac4c577e0a3c16958fa3c6acdf4656cbfbf 100644 (file)
@@ -992,7 +992,7 @@ static void zs_release_port(struct uart_port *uport)
 static int zs_map_port(struct uart_port *uport)
 {
        if (!uport->membase)
-               uport->membase = ioremap_nocache(uport->mapbase,
+               uport->membase = ioremap(uport->mapbase,
                                                 ZS_CHAN_IO_SIZE);
        if (!uport->membase) {
                printk(KERN_ERR "zs: Cannot map MMIO\n");
index 84f26e43b2292f4599ec79301c7591ddf017cc54..0ca13f889d84035aff574ebd83ddb9ed97df070a 100644 (file)
@@ -4054,7 +4054,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                }
                info->lcr_mem_requested = true;
 
-               info->memory_base = ioremap_nocache(info->phys_memory_base,
+               info->memory_base = ioremap(info->phys_memory_base,
                                                                0x40000);
                if (!info->memory_base) {
                        printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
@@ -4068,7 +4068,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                        goto errout;
                }
                
-               info->lcr_base = ioremap_nocache(info->phys_lcr_base,
+               info->lcr_base = ioremap(info->phys_lcr_base,
                                                                PAGE_SIZE);
                if (!info->lcr_base) {
                        printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
index e8a9047de45168b207e14802c6e0c5e764f1c896..e506fc489d484754966192d0f270afb8572866ae 100644 (file)
@@ -3450,7 +3450,7 @@ static int claim_resources(struct slgt_info *info)
        else
                info->reg_addr_requested = true;
 
-       info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
+       info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
        if (!info->reg_addr) {
                DBGERR(("%s can't map device registers, addr=%08X\n",
                        info->device_name, info->phys_reg_addr));
index fcb91bf7a15ba30347a2cf0273aa6cf8b5d0a360..b9d974474b6443c8cbc1688ac5075a9aeb6b512d 100644 (file)
@@ -3559,7 +3559,7 @@ static int claim_resources(SLMP_INFO *info)
        else
                info->sca_statctrl_requested = true;
 
-       info->memory_base = ioremap_nocache(info->phys_memory_base,
+       info->memory_base = ioremap(info->phys_memory_base,
                                                                SCA_MEM_SIZE);
        if (!info->memory_base) {
                printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n",
@@ -3568,7 +3568,7 @@ static int claim_resources(SLMP_INFO *info)
                goto errout;
        }
 
-       info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
+       info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE);
        if (!info->lcr_base) {
                printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n",
                        __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
@@ -3577,7 +3577,7 @@ static int claim_resources(SLMP_INFO *info)
        }
        info->lcr_base += info->lcr_offset;
 
-       info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
+       info->sca_base = ioremap(info->phys_sca_base, PAGE_SIZE);
        if (!info->sca_base) {
                printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n",
                        __FILE__,__LINE__,info->device_name, info->phys_sca_base );
@@ -3586,7 +3586,7 @@ static int claim_resources(SLMP_INFO *info)
        }
        info->sca_base += info->sca_offset;
 
-       info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
+       info->statctrl_base = ioremap(info->phys_statctrl_base,
                                                                PAGE_SIZE);
        if (!info->statctrl_base) {
                printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n",
index 8b0ea8c70d73040bae8a09f85e7f086ce6afc0ad..635cf0466b5921c103bcf1b1c5bd657cf32e7e71 100644 (file)
@@ -2124,10 +2124,11 @@ resubmit:
 /*
  * Start the modem : init the data and start kernel thread
  */
-static int uea_boot(struct uea_softc *sc)
+static int uea_boot(struct uea_softc *sc, struct usb_interface *intf)
 {
-       int ret, size;
        struct intr_pkt *intr;
+       int ret = -ENOMEM;
+       int size;
 
        uea_enters(INS_TO_USBDEV(sc));
 
@@ -2152,6 +2153,11 @@ static int uea_boot(struct uea_softc *sc)
        if (UEA_CHIP_VERSION(sc) == ADI930)
                load_XILINX_firmware(sc);
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
+               ret = -ENODEV;
+               goto err0;
+       }
+
        intr = kmalloc(size, GFP_KERNEL);
        if (!intr)
                goto err0;
@@ -2163,8 +2169,7 @@ static int uea_boot(struct uea_softc *sc)
        usb_fill_int_urb(sc->urb_int, sc->usb_dev,
                         usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
                         intr, size, uea_intr, sc,
-                        sc->usb_dev->actconfig->interface[0]->altsetting[0].
-                        endpoint[0].desc.bInterval);
+                        intf->cur_altsetting->endpoint[0].desc.bInterval);
 
        ret = usb_submit_urb(sc->urb_int, GFP_KERNEL);
        if (ret < 0) {
@@ -2179,6 +2184,7 @@ static int uea_boot(struct uea_softc *sc)
        sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm");
        if (IS_ERR(sc->kthread)) {
                uea_err(INS_TO_USBDEV(sc), "failed to create thread\n");
+               ret = PTR_ERR(sc->kthread);
                goto err2;
        }
 
@@ -2193,7 +2199,7 @@ err1:
        kfree(intr);
 err0:
        uea_leaves(INS_TO_USBDEV(sc));
-       return -ENOMEM;
+       return ret;
 }
 
 /*
@@ -2548,7 +2554,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
                }
        }
 
-       ret = uea_boot(sc);
+       ret = uea_boot(sc, intf);
        if (ret < 0)
                goto error;
 
index dbea28495e1ddb49193c9acae8b15a8f08cfb0c6..4e12a32ca392d3954489c9d4a441ad0bcdcb7729 100644 (file)
@@ -1275,7 +1275,7 @@ EXPORT_SYMBOL_GPL(usbatm_usb_disconnect);
 
 static int __init usbatm_usb_init(void)
 {
-       if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
+       if (sizeof(struct usbatm_control) > sizeof_field(struct sk_buff, cb)) {
                printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
                return -EIO;
        }
index 4c1e755093039d0582ca30922d620009441aed0c..02f6ca2cb1ba1c99d26efd2e225889b0f9c3908d 100644 (file)
@@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
  */
 static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
 {
-       struct cdns3_device *priv_dev;
-       struct cdns3 *cdns = data;
+       struct cdns3_device *priv_dev = data;
        irqreturn_t ret = IRQ_NONE;
        u32 reg;
 
-       priv_dev = cdns->gadget_dev;
-
        /* check USB device interrupt */
        reg = readl(&priv_dev->regs->usb_ists);
        if (reg) {
@@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
  */
 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
 {
-       struct cdns3_device *priv_dev;
-       struct cdns3 *cdns = data;
+       struct cdns3_device *priv_dev = data;
        irqreturn_t ret = IRQ_NONE;
        unsigned long flags;
        int bit;
        u32 reg;
 
-       priv_dev = cdns->gadget_dev;
        spin_lock_irqsave(&priv_dev->lock, flags);
 
        reg = readl(&priv_dev->regs->usb_ists);
@@ -2539,7 +2534,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
 
        priv_dev = cdns->gadget_dev;
 
-       devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
+       devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
 
        pm_runtime_mark_last_busy(cdns->dev);
        pm_runtime_put_autosuspend(cdns->dev);
@@ -2710,7 +2705,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
        ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
                                        cdns3_device_irq_handler,
                                        cdns3_device_thread_irq_handler,
-                                       IRQF_SHARED, dev_name(cdns->dev), cdns);
+                                       IRQF_SHARED, dev_name(cdns->dev),
+                                       cdns->gadget_dev);
 
        if (ret)
                goto err0;
index b45ceb91c735e71a6f4db81e5dd24aa3a77819e8..48e4a5ca183591fd577b9e3cc9e0174a3b739beb 100644 (file)
@@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd);
 
 struct ehci_ci_priv {
        struct regulator *reg_vbus;
+       bool enabled;
 };
 
 static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
@@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
        int ret = 0;
        int port = HCS_N_PORTS(ehci->hcs_params);
 
-       if (priv->reg_vbus) {
+       if (priv->reg_vbus && enable != priv->enabled) {
                if (port > 1) {
                        dev_warn(dev,
                                "Not support multi-port regulator control\n");
@@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
                                enable ? "enable" : "disable", ret);
                        return ret;
                }
+               priv->enabled = enable;
        }
 
        if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
index 87338f9eb5bec73279acc5e679290875cd4a0072..ed204cbb63ea19719c4a72c13dd28987dcc26f1c 100644 (file)
@@ -156,7 +156,8 @@ static int usb_conn_probe(struct platform_device *pdev)
 
        info->vbus = devm_regulator_get(dev, "vbus");
        if (IS_ERR(info->vbus)) {
-               dev_err(dev, "failed to get vbus\n");
+               if (PTR_ERR(info->vbus) != -EPROBE_DEFER)
+                       dev_err(dev, "failed to get vbus\n");
                return PTR_ERR(info->vbus);
        }
 
index 5f40117e68e76e07949d95182abfe39af7936a21..26bc05e48d8a7414121dd348e7dfc06e6916cedc 100644 (file)
@@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_INT] = 1024,
 };
 
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
-    int asnum, struct usb_host_interface *ifp, int num_ep,
-    unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+               struct usb_endpoint_descriptor *e2)
+{
+       if (e1->bEndpointAddress == e2->bEndpointAddress)
+               return true;
+
+       if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+               if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+               int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+       struct usb_endpoint_descriptor *epd;
+       struct usb_interface_cache *intfc;
+       struct usb_host_interface *alt;
+       int i, j, k;
+
+       for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+               intfc = config->intf_cache[i];
+
+               for (j = 0; j < intfc->num_altsetting; ++j) {
+                       alt = &intfc->altsetting[j];
+
+                       if (alt->desc.bInterfaceNumber == inum &&
+                                       alt->desc.bAlternateSetting != asnum)
+                               continue;
+
+                       for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+                               epd = &alt->endpoint[k].desc;
+
+                               if (endpoint_is_duplicate(epd, d))
+                                       return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+               struct usb_host_config *config, int inum, int asnum,
+               struct usb_host_interface *ifp, int num_ep,
+               unsigned char *buffer, int size)
 {
        unsigned char *buffer0 = buffer;
        struct usb_endpoint_descriptor *d;
@@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                goto skip_to_next_endpoint_or_interface_descriptor;
 
        /* Check for duplicate endpoint addresses */
-       for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
-               if (ifp->endpoint[i].desc.bEndpointAddress ==
-                   d->bEndpointAddress) {
-                       dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
-                           cfgno, inum, asnum, d->bEndpointAddress);
-                       goto skip_to_next_endpoint_or_interface_descriptor;
-               }
+       if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+               dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+                               cfgno, inum, asnum, d->bEndpointAddress);
+               goto skip_to_next_endpoint_or_interface_descriptor;
        }
 
        endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                        endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
        }
 
-       /* Validate the wMaxPacketSize field */
+       /*
+        * Validate the wMaxPacketSize field.
+        * Some devices have isochronous endpoints in altsetting 0;
+        * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+        * (see the end of section 5.6.3), so don't warn about them.
+        */
        maxp = usb_endpoint_maxp(&endpoint->desc);
-       if (maxp == 0) {
-               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+       if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
                    cfgno, inum, asnum, d->bEndpointAddress);
-               goto skip_to_next_endpoint_or_interface_descriptor;
        }
 
        /* Find the highest legal maxpacket size for this endpoint */
@@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
                if (((struct usb_descriptor_header *) buffer)->bDescriptorType
                     == USB_DT_INTERFACE)
                        break;
-               retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
-                   num_ep, buffer, size);
+               retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+                               alt, num_ep, buffer, size);
                if (retval < 0)
                        return retval;
                ++n;
index 9ae2a7a93df20168720413db81e2c882d6e54dda..f0a259937da8e3a3ca9838cd07feb9abe37979ee 100644 (file)
@@ -222,7 +222,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        retval = -EBUSY;
                        goto put_hcd;
                }
-               hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+               hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start,
                                hcd->rsrc_len);
                if (hcd->regs == NULL) {
                        dev_dbg(&dev->dev, "error mapping memory\n");
index 281568d464f97d2ffed2618207b3e8737f7e413e..aa45840d82730ab290f656ff6f9c46a278470972 100644 (file)
@@ -1409,7 +1409,17 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
        if (usb_endpoint_xfer_control(&urb->ep->desc)) {
                if (hcd->self.uses_pio_for_control)
                        return ret;
-               if (hcd_uses_dma(hcd)) {
+               if (hcd->localmem_pool) {
+                       ret = hcd_alloc_coherent(
+                                       urb->dev->bus, mem_flags,
+                                       &urb->setup_dma,
+                                       (void **)&urb->setup_packet,
+                                       sizeof(struct usb_ctrlrequest),
+                                       DMA_TO_DEVICE);
+                       if (ret)
+                               return ret;
+                       urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
+               } else if (hcd_uses_dma(hcd)) {
                        if (object_is_on_stack(urb->setup_packet)) {
                                WARN_ONCE(1, "setup packet is on stack\n");
                                return -EAGAIN;
@@ -1424,23 +1434,22 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
                                                urb->setup_dma))
                                return -EAGAIN;
                        urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
-               } else if (hcd->localmem_pool) {
-                       ret = hcd_alloc_coherent(
-                                       urb->dev->bus, mem_flags,
-                                       &urb->setup_dma,
-                                       (void **)&urb->setup_packet,
-                                       sizeof(struct usb_ctrlrequest),
-                                       DMA_TO_DEVICE);
-                       if (ret)
-                               return ret;
-                       urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
                }
        }
 
        dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
        if (urb->transfer_buffer_length != 0
            && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
-               if (hcd_uses_dma(hcd)) {
+               if (hcd->localmem_pool) {
+                       ret = hcd_alloc_coherent(
+                                       urb->dev->bus, mem_flags,
+                                       &urb->transfer_dma,
+                                       &urb->transfer_buffer,
+                                       urb->transfer_buffer_length,
+                                       dir);
+                       if (ret == 0)
+                               urb->transfer_flags |= URB_MAP_LOCAL;
+               } else if (hcd_uses_dma(hcd)) {
                        if (urb->num_sgs) {
                                int n;
 
@@ -1491,15 +1500,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
                                else
                                        urb->transfer_flags |= URB_DMA_MAP_SINGLE;
                        }
-               } else if (hcd->localmem_pool) {
-                       ret = hcd_alloc_coherent(
-                                       urb->dev->bus, mem_flags,
-                                       &urb->transfer_dma,
-                                       &urb->transfer_buffer,
-                                       urb->transfer_buffer_length,
-                                       dir);
-                       if (ret == 0)
-                               urb->transfer_flags |= URB_MAP_LOCAL;
                }
                if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
                                URB_SETUP_MAP_LOCAL)))
index f229ad6952c0a941a9952fe619d847cf5ccab58b..3405b146edc94f3e6fa153a769fcccccb7ee4465 100644 (file)
@@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                         * PORT_OVER_CURRENT is not. So check for any of them.
                         */
                        if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+                           (portchange & USB_PORT_STAT_C_CONNECTION) ||
                            (portstatus & USB_PORT_STAT_OVERCURRENT) ||
                            (portchange & USB_PORT_STAT_C_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
@@ -2692,7 +2693,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 #define SET_ADDRESS_TRIES      2
 #define GET_DESCRIPTOR_TRIES   2
 #define SET_CONFIG_TRIES       (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i, scheme)      ((i) / 2 == (int)scheme)
+#define USE_NEW_SCHEME(i, scheme)      ((i) / 2 == (int)(scheme))
 
 #define HUB_ROOT_RESET_TIME    60      /* times are in msec */
 #define HUB_SHORT_RESET_TIME   10
index 0eab79f82ce41bd107de6e2997543a0aabfaf3a6..da923ec176122210705bb67cda9c81cecf83b3f9 100644 (file)
@@ -45,6 +45,7 @@ void usb_init_urb(struct urb *urb)
        if (urb) {
                memset(urb, 0, sizeof(*urb));
                kref_init(&urb->kref);
+               INIT_LIST_HEAD(&urb->urb_list);
                INIT_LIST_HEAD(&urb->anchor_list);
        }
 }
index 023f0357efd77eae3182aaf596120b8a8847fa8b..294276f7deb9e3451e191d769b960d3d143a5b3b 100644 (file)
@@ -29,7 +29,8 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP                        0xa2b0
-#define PCI_DEVICE_ID_INTEL_CMLH               0x02ee
+#define PCI_DEVICE_ID_INTEL_CMLLP              0x02ee
+#define PCI_DEVICE_ID_INTEL_CMLH               0x06ee
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
@@ -308,6 +309,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
          (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
          (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
index 3996b9c4ff8d41fd0f46ebec5ab293661fc89dba..fd1b100d2927eecb57c2fb86f1d52ee24bfaa57b 100644 (file)
@@ -1117,6 +1117,9 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
                const struct dwc3_event_depevt *event)
 {
+       struct dwc3_ep  *dep = dwc->eps[event->endpoint_number];
+       u8              cmd;
+
        switch (event->endpoint_event) {
        case DWC3_DEPEVT_XFERCOMPLETE:
                dwc3_ep0_xfer_complete(dwc, event);
@@ -1129,7 +1132,12 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc,
        case DWC3_DEPEVT_XFERINPROGRESS:
        case DWC3_DEPEVT_RXTXFIFOEVT:
        case DWC3_DEPEVT_STREAMEVT:
+               break;
        case DWC3_DEPEVT_EPCMDCMPLT:
+               cmd = DEPEVT_PARAMETER_CMD(event->parameters);
+
+               if (cmd == DWC3_DEPCMD_ENDTRANSFER)
+                       dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
                break;
        }
 }
index a9aba716bf80be70c3a8df033f8e468f59477560..154f3f3e8cff849754904721e2f0730f25fff7cd 100644 (file)
@@ -2467,6 +2467,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
 
 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
 {
+       /*
+        * For OUT direction, host may send less than the setup
+        * length. Return true for all OUT requests.
+        */
+       if (!req->direction)
+               return true;
+
        return req->request.actual == req->request.length;
 }
 
@@ -2491,7 +2498,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
 
        req->request.actual = req->request.length - req->remaining;
 
-       if (!dwc3_gadget_ep_request_completed(req) &&
+       if (!dwc3_gadget_ep_request_completed(req) ||
                        req->num_pending_sgs) {
                __dwc3_gadget_kick_transfer(dep);
                goto out;
@@ -2719,6 +2726,9 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
        WARN_ON_ONCE(ret);
        dep->resource_index = 0;
 
+       if (!interrupt)
+               dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+
        if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
                udelay(100);
 }
index 5567ed2cddbec278a1565216c07ac0a6fff475bc..fa252870c926f1834d8ba6ed29724d1deb32cf7b 100644 (file)
@@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc)
        memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
 
        if (dwc->usb3_lpm_capable)
-               props[prop_idx++].name = "usb3-lpm-capable";
+               props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
 
        if (dwc->usb2_lpm_disable)
-               props[prop_idx++].name = "usb2-lpm-disable";
+               props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
 
        /**
         * WORKAROUND: dwc3 revisions <=3.00a have a limitation
@@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc)
         * This following flag tells XHCI to do just that.
         */
        if (dwc->revision <= DWC3_REVISION_300A)
-               props[prop_idx++].name = "quirk-broken-port-ped";
+               props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
 
        if (prop_idx) {
                ret = platform_device_add_properties(xhci, props);
index cac991173ac042b2fea6f0aca07bba52c642fc15..971c6b92484add56e477f22f600c7aa974bde664 100644 (file)
@@ -971,7 +971,7 @@ static int __init xdbc_init(void)
                goto free_and_quit;
        }
 
-       base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
+       base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
        if (!base) {
                xdbc_trace("failed to remap the io address\n");
                ret = -ENOMEM;
index 6ce044008cf6c4197dda79df4f5f1aab74b846b9..460d5d7c984f50c66ff6bc821cea526986008d2a 100644 (file)
@@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f)
 
        DBG(cdev, "ecm deactivated\n");
 
-       if (ecm->port.in_ep->enabled)
+       if (ecm->port.in_ep->enabled) {
                gether_disconnect(&ecm->port);
+       } else {
+               ecm->port.in_ep->desc = NULL;
+               ecm->port.out_ep->desc = NULL;
+       }
 
        usb_ep_disable(ecm->notify);
        ecm->notify->desc = NULL;
index ce1d0235969c369ce02bd2c5fa6dfbfb36a8a549..0bbccac94d6c5eea78050d67bbf42dcf5f028842 100644 (file)
@@ -3509,7 +3509,7 @@ static void ffs_free_inst(struct usb_function_instance *f)
 
 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
 {
-       if (strlen(name) >= FIELD_SIZEOF(struct ffs_dev, name))
+       if (strlen(name) >= sizeof_field(struct ffs_dev, name))
                return -ENAMETOOLONG;
        return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
 }
index d48df36622b74b565b037d5ce56d95dd1b150162..0d8e4a364ca6e0d54d01fc644c4907287b88646c 100644 (file)
@@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f)
        gether_disconnect(&rndis->port);
 
        usb_ep_disable(rndis->notify);
+       rndis->notify->desc = NULL;
 }
 
 /*-------------------------------------------------------------------------*/
index ae70ce29d5e42d8e3a4e280de8a9b5ecc969adc9..797d6ace89943a0015a3814bef336b7dde724524 100644 (file)
@@ -445,6 +445,7 @@ config USB_TEGRA_XUDC
        tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
        depends on ARCH_TEGRA || COMPILE_TEST
        depends on PHY_TEGRA_XUSB
+       select USB_ROLE_SWITCH
        help
         Enables NVIDIA Tegra USB 3.0 device mode controller driver.
 
index 57b6f66331cfaf5739952fd173f1997ec8b6b399..bfd1c9e80a1f79722157caab0de4a79bcb0f971c 100644 (file)
@@ -116,7 +116,7 @@ static int udc_pci_probe(
                goto err_memreg;
        }
 
-       dev->virt_addr = ioremap_nocache(resource, len);
+       dev->virt_addr = ioremap(resource, len);
        if (!dev->virt_addr) {
                dev_dbg(&pdev->dev, "start address cannot be mapped\n");
                retval = -EFAULT;
index c3721225b61ed29632e4bcb55ab6a3364da2b9bf..4a46f661d0e41fe7c1745cfeb42cd487b31bea87 100644 (file)
@@ -1782,7 +1782,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        dev->got_region = 1;
 
-       base = ioremap_nocache(resource, len);
+       base = ioremap(resource, len);
        if (base == NULL) {
                DBG(dev, "can't map memory\n");
                retval = -EFAULT;
index 247de0faaeb7f22df485969648438d8cf601ec7b..a8273b589456b930be62c403d79a683025ed2c81 100644 (file)
@@ -2323,7 +2323,7 @@ net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
                        goto err;
                }
 
-               mem_mapped_addr[i] = ioremap_nocache(resource, len);
+               mem_mapped_addr[i] = ioremap(resource, len);
                if (mem_mapped_addr[i] == NULL) {
                        release_mem_region(resource, len);
                        dev_dbg(dev->dev, "can't map memory\n");
@@ -2401,7 +2401,7 @@ net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
                        goto err;
                }
 
-               mem_mapped_addr[i] = ioremap_nocache(resource, len);
+               mem_mapped_addr[i] = ioremap(resource, len);
                if (mem_mapped_addr[i] == NULL) {
                        release_mem_region(resource, len);
                        dev_dbg(dev->dev, "can't map memory\n");
@@ -2625,7 +2625,7 @@ net2272_plat_probe(struct platform_device *pdev)
                ret = -EBUSY;
                goto err;
        }
-       dev->base_addr = ioremap_nocache(base, len);
+       dev->base_addr = ioremap(base, len);
        if (!dev->base_addr) {
                dev_dbg(dev->dev, "can't map memory\n");
                ret = -EFAULT;
index 51efee21915ff12586dfe43c54ec8e682ec5cacc..1fd1b9186e46eab97be02e73b75be42b0da65cdf 100644 (file)
@@ -3659,7 +3659,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * 8051 code into the chip, e.g. to turn on PCI PM.
         */
 
-       base = ioremap_nocache(resource, len);
+       base = ioremap(resource, len);
        if (base == NULL) {
                ep_dbg(dev, "can't map memory\n");
                retval = -EFAULT;
index a2b610dbedfc60911dd6b8be40ef0ecde561fed1..2d462fbbe0a6fbd870c992e105d303fad6049977 100644 (file)
@@ -107,7 +107,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
        if (!request_mem_region(res->start, res_len, "mab regs"))
                return -EBUSY;
 
-       dev->mab_regs = ioremap_nocache(res->start, res_len);
+       dev->mab_regs = ioremap(res->start, res_len);
        if (dev->mab_regs == NULL) {
                retval = -ENOMEM;
                goto err1;
@@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
                retval = -EBUSY;
                goto err2;
        }
-       dev->usbid_regs = ioremap_nocache(res->start, res_len);
+       dev->usbid_regs = ioremap(res->start, res_len);
        if (dev->usbid_regs == NULL) {
                retval = -ENOMEM;
                goto err3;
@@ -178,7 +178,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver,
                retval = -EBUSY;
                goto err1;
        }
-       hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+       hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
        if (!hcd->regs) {
                pr_debug("ioremap failed");
                retval = -ENOMEM;
index aa2f77f1506ddadead56753697b7d7c8a494c592..8a5c9b3ebe1e74e982ff49060e48f1938049bf1b 100644 (file)
 
 /*-------------------------------------------------------------------------*/
 
+/* PID Codes that are used here, from EHCI specification, Table 3-16. */
+#define PID_CODE_IN    1
+#define PID_CODE_SETUP 2
+
 /* fill a qtd, returning how much of the buffer we were able to queue up */
 
 static int
@@ -190,7 +194,7 @@ static int qtd_copy_status (
        int     status = -EINPROGRESS;
 
        /* count IN/OUT bytes, not SETUP (even short packets) */
-       if (likely (QTD_PID (token) != 2))
+       if (likely(QTD_PID(token) != PID_CODE_SETUP))
                urb->actual_length += length - QTD_LENGTH (token);
 
        /* don't modify error codes */
@@ -206,6 +210,13 @@ static int qtd_copy_status (
                if (token & QTD_STS_BABBLE) {
                        /* FIXME "must" disable babbling device's port too */
                        status = -EOVERFLOW;
+               /*
+                * When MMF is active and PID Code is IN, queue is halted.
+                * EHCI Specification, Table 4-13.
+                */
+               } else if ((token & QTD_STS_MMF) &&
+                                       (QTD_PID(token) == PID_CODE_IN)) {
+                       status = -EPROTO;
                /* CERR nonzero + halt --> stall */
                } else if (QTD_CERR(token)) {
                        status = -EPIPE;
index 38183ac438c673871b08ecb485bcf6b20893bb18..1371b0c249ece0d4985222e74f91ea6caf637128 100644 (file)
@@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
        }
 
        da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
-       if (IS_ERR(da8xx_ohci->oc_gpio))
+       if (IS_ERR(da8xx_ohci->oc_gpio)) {
+               error = PTR_ERR(da8xx_ohci->oc_gpio);
                goto err;
+       }
 
        if (da8xx_ohci->oc_gpio) {
                oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
-               if (oc_irq < 0)
+               if (oc_irq < 0) {
+                       error = oc_irq;
                        goto err;
+               }
 
                error = devm_request_threaded_irq(dev, oc_irq, NULL,
                                ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
index 6c7f0a876b96ba3c4901d2a2934741fff398fe2b..beb2efa71341f6b38bec258160165154ef4ceb2d 100644 (file)
@@ -1150,7 +1150,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
        if (!mmio_resource_enabled(pdev, 0))
                return;
 
-       base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+       base = ioremap(pci_resource_start(pdev, 0), len);
        if (base == NULL)
                return;
 
index b7d23c4387569e290a7482389169aa932312be7f..7a3a29e5e9d29d33bec4e9ff45e50f26986dbf96 100644 (file)
@@ -806,7 +806,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
 
 static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
                                             u32 *status, u32 portsc,
-                                            unsigned long flags)
+                                            unsigned long *flags)
 {
        struct xhci_bus_state *bus_state;
        struct xhci_hcd *xhci;
@@ -860,11 +860,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
                xhci_test_and_clear_bit(xhci, port, PORT_PLC);
                xhci_set_link_state(xhci, port, XDEV_U0);
 
-               spin_unlock_irqrestore(&xhci->lock, flags);
+               spin_unlock_irqrestore(&xhci->lock, *flags);
                time_left = wait_for_completion_timeout(
                        &bus_state->rexit_done[wIndex],
                        msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS));
-               spin_lock_irqsave(&xhci->lock, flags);
+               spin_lock_irqsave(&xhci->lock, *flags);
 
                if (time_left) {
                        slot_id = xhci_find_slot_id_by_port(hcd, xhci,
@@ -920,11 +920,13 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
 {
        struct xhci_bus_state *bus_state;
        struct xhci_hcd *xhci;
+       struct usb_hcd *hcd;
        u32 link_state;
        u32 portnum;
 
        bus_state = &port->rhub->bus_state;
        xhci = hcd_to_xhci(port->rhub->hcd);
+       hcd = port->rhub->hcd;
        link_state = portsc & PORT_PLS_MASK;
        portnum = port->hcd_portnum;
 
@@ -952,12 +954,20 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
                        bus_state->suspended_ports &= ~(1 << portnum);
        }
 
+       /* remote wake resume signaling complete */
+       if (bus_state->port_remote_wakeup & (1 << portnum) &&
+           link_state != XDEV_RESUME &&
+           link_state != XDEV_RECOVERY) {
+               bus_state->port_remote_wakeup &= ~(1 << portnum);
+               usb_hcd_end_port_resume(&hcd->self, portnum);
+       }
+
        xhci_hub_report_usb3_link_state(xhci, status, portsc);
        xhci_del_comp_mod_timer(xhci, portsc, portnum);
 }
 
 static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
-                                     u32 portsc, unsigned long flags)
+                                     u32 portsc, unsigned long *flags)
 {
        struct xhci_bus_state *bus_state;
        u32 link_state;
@@ -1007,7 +1017,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
 static u32 xhci_get_port_status(struct usb_hcd *hcd,
                struct xhci_bus_state *bus_state,
        u16 wIndex, u32 raw_port_status,
-               unsigned long flags)
+               unsigned long *flags)
        __releases(&xhci->lock)
        __acquires(&xhci->lock)
 {
@@ -1130,7 +1140,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                }
                trace_xhci_get_port_status(wIndex, temp);
                status = xhci_get_port_status(hcd, bus_state, wIndex, temp,
-                                             flags);
+                                             &flags);
                if (status == 0xffffffff)
                        goto error;
 
index e16eda6e2b8b22b768387a5c0c2a5091c4573059..3b1388fa2f36e74093e2dc2abc0083cd650266c5 100644 (file)
@@ -1909,13 +1909,17 @@ no_bw:
        xhci->usb3_rhub.num_ports = 0;
        xhci->num_active_eps = 0;
        kfree(xhci->usb2_rhub.ports);
+       kfree(xhci->usb2_rhub.psi);
        kfree(xhci->usb3_rhub.ports);
+       kfree(xhci->usb3_rhub.psi);
        kfree(xhci->hw_ports);
        kfree(xhci->rh_bw);
        kfree(xhci->ext_caps);
 
        xhci->usb2_rhub.ports = NULL;
+       xhci->usb2_rhub.psi = NULL;
        xhci->usb3_rhub.ports = NULL;
+       xhci->usb3_rhub.psi = NULL;
        xhci->hw_ports = NULL;
        xhci->rh_bw = NULL;
        xhci->ext_caps = NULL;
index a0025d23b25735b86dbf12eb1838130627319e61..4917c5b033faccd0f6b9fe903dbf99cb8a47d62c 100644 (file)
@@ -519,6 +519,18 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
        retval = xhci_resume(xhci, hibernated);
        return retval;
 }
+
+static void xhci_pci_shutdown(struct usb_hcd *hcd)
+{
+       struct xhci_hcd         *xhci = hcd_to_xhci(hcd);
+       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
+
+       xhci_shutdown(hcd);
+
+       /* Yet another workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(pdev, PCI_D3hot);
+}
 #endif /* CONFIG_PM */
 
 /*-------------------------------------------------------------------------*/
@@ -556,6 +568,7 @@ static int __init xhci_pci_init(void)
 #ifdef CONFIG_PM
        xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend;
        xhci_pci_hc_driver.pci_resume = xhci_pci_resume;
+       xhci_pci_hc_driver.shutdown = xhci_pci_shutdown;
 #endif
        return pci_register_driver(&xhci_pci_driver);
 }
index 6475c3d3b43b660a6bc7cc06ad994fcf8f46fc40..d23f7408c81f1e4409389a5b51b3b05ff93275a0 100644 (file)
@@ -1628,7 +1628,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
                slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
                if (slot_id && xhci->devs[slot_id])
                        xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
-               bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
        }
 
        if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1648,6 +1647,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                         */
                        bus_state->port_remote_wakeup |= 1 << hcd_portnum;
                        xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+                       usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
                        xhci_set_link_state(xhci, port, XDEV_U0);
                        /* Need to wait until the next link state change
                         * indicates the device is actually in U0.
@@ -1688,7 +1688,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
                if (slot_id && xhci->devs[slot_id])
                        xhci_ring_device(xhci, slot_id);
                if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
-                       bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
                        xhci_test_and_clear_bit(xhci, port, PORT_PLC);
                        usb_wakeup_notification(hcd->self.root_hub,
                                        hcd_portnum + 1);
@@ -2382,7 +2381,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        case COMP_SUCCESS:
                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
                        break;
-               if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+               if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
+                   ep_ring->last_td_was_short)
                        trb_comp_code = COMP_SHORT_PACKET;
                else
                        xhci_warn_ratelimited(xhci,
index 6721d059f58a1a849d1acaf983e47647bba7698a..dbac0fa9748d5fcea4bbf44ce610c4ecd1d5b5c7 100644 (file)
@@ -770,7 +770,7 @@ static void xhci_stop(struct usb_hcd *hcd)
  *
  * This will only ever be called with the main usb_hcd (the USB3 roothub).
  */
-static void xhci_shutdown(struct usb_hcd *hcd)
+void xhci_shutdown(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
@@ -789,11 +789,8 @@ static void xhci_shutdown(struct usb_hcd *hcd)
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "xhci_shutdown completed - status = %x",
                        readl(&xhci->op_regs->status));
-
-       /* Yet another workaround for spurious wakeups at shutdown with HSW */
-       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
-               pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
 }
+EXPORT_SYMBOL_GPL(xhci_shutdown);
 
 #ifdef CONFIG_PM
 static void xhci_save_registers(struct xhci_hcd *xhci)
@@ -973,7 +970,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci)
 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
 {
        int                     rc = 0;
-       unsigned int            delay = XHCI_MAX_HALT_USEC;
+       unsigned int            delay = XHCI_MAX_HALT_USEC * 2;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
        u32                     res;
index dc6f62a4b1979e8037422fae9163e4edcd344185..13d8838cd552be01b145e0d2f78dc815c3ffad0b 100644 (file)
@@ -2050,6 +2050,7 @@ int xhci_start(struct xhci_hcd *xhci);
 int xhci_reset(struct xhci_hcd *xhci);
 int xhci_run(struct usb_hcd *hcd);
 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+void xhci_shutdown(struct usb_hcd *hcd);
 void xhci_init_driver(struct hc_driver *drv,
                      const struct xhci_driver_overrides *over);
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
index 07cc82ff327c986b045e413f5f268a4663a2a8d4..ccd30f83588846a0bfd1ffff60bb98fd42d28c41 100644 (file)
@@ -50,7 +50,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
        }
 
        /* map available memory */
-       iobase = ioremap_nocache(mem_start, mem_length);
+       iobase = ioremap(mem_start, mem_length);
        if (!iobase) {
                printk(KERN_ERR "Error ioremap failed\n");
                release_mem_region(mem_start, mem_length);
@@ -101,7 +101,7 @@ static int isp1761_pci_init(struct pci_dev *dev)
                return -EBUSY;
        }
 
-       iobase = ioremap_nocache(mem_start, mem_length);
+       iobase = ioremap(mem_start, mem_length);
        if (!iobase) {
                printk(KERN_ERR "ioremap #1\n");
                release_mem_region(mem_start, mem_length);
index 6f5edb9fc61e024cc9549497c7d2629230b585e0..d8d157c4c271d32e44eb923bccef4dba6cd4192f 100644 (file)
@@ -669,7 +669,7 @@ static int adu_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->read_wait);
        init_waitqueue_head(&dev->write_wait);
 
-       res = usb_find_common_endpoints_reverse(&interface->altsetting[0],
+       res = usb_find_common_endpoints_reverse(interface->cur_altsetting,
                        NULL, NULL,
                        &dev->interrupt_in_endpoint,
                        &dev->interrupt_out_endpoint);
index 4afb5ddfd361f29b7506272bc270b02592aaff5f..e9437a176518a5f137a4df32d393b483e6d3b23c 100644 (file)
@@ -322,7 +322,7 @@ static int idmouse_probe(struct usb_interface *interface,
        int result;
 
        /* check if we have gotten the data or the hid interface */
-       iface_desc = &interface->altsetting[0];
+       iface_desc = interface->cur_altsetting;
        if (iface_desc->desc.bInterfaceClass != 0x0A)
                return -ENODEV;
 
index ac2b4fcc265f65c10a128bcf092de3a134319dc8..f48a23adbc35ddbbc66c5227d8c59e3413791b05 100644 (file)
@@ -1039,12 +1039,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
 
                mutex_lock(&rp->fetch_lock);
                spin_lock_irqsave(&rp->b_lock, flags);
-               mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
-               kfree(rp->b_vec);
-               rp->b_vec  = vec;
-               rp->b_size = size;
-               rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
-               rp->cnt_lost = 0;
+               if (rp->mmap_active) {
+                       mon_free_buff(vec, size/CHUNK_SIZE);
+                       kfree(vec);
+                       ret = -EBUSY;
+               } else {
+                       mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
+                       kfree(rp->b_vec);
+                       rp->b_vec  = vec;
+                       rp->b_size = size;
+                       rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
+                       rp->cnt_lost = 0;
+               }
                spin_unlock_irqrestore(&rp->b_lock, flags);
                mutex_unlock(&rp->fetch_lock);
                }
@@ -1216,13 +1222,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait)
 static void mon_bin_vma_open(struct vm_area_struct *vma)
 {
        struct mon_reader_bin *rp = vma->vm_private_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rp->b_lock, flags);
        rp->mmap_active++;
+       spin_unlock_irqrestore(&rp->b_lock, flags);
 }
 
 static void mon_bin_vma_close(struct vm_area_struct *vma)
 {
+       unsigned long flags;
+
        struct mon_reader_bin *rp = vma->vm_private_data;
+       spin_lock_irqsave(&rp->b_lock, flags);
        rp->mmap_active--;
+       spin_unlock_irqrestore(&rp->b_lock, flags);
 }
 
 /*
@@ -1234,16 +1248,12 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
        unsigned long offset, chunk_idx;
        struct page *pageptr;
 
-       mutex_lock(&rp->fetch_lock);
        offset = vmf->pgoff << PAGE_SHIFT;
-       if (offset >= rp->b_size) {
-               mutex_unlock(&rp->fetch_lock);
+       if (offset >= rp->b_size)
                return VM_FAULT_SIGBUS;
-       }
        chunk_idx = offset / CHUNK_SIZE;
        pageptr = rp->b_vec[chunk_idx].pg;
        get_page(pageptr);
-       mutex_unlock(&rp->fetch_lock);
        vmf->page = pageptr;
        return 0;
 }
index 5261f8dfedecd4c6fa4b7da20b43edce50cd0d62..e3b8c84ccdb80c6087e14dae2e3fca5e3c0122fd 100644 (file)
@@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = {
 static int jz4740_musb_init(struct musb *musb)
 {
        struct device *dev = musb->controller->parent;
+       int err;
 
        if (dev->of_node)
                musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
        else
                musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
        if (IS_ERR(musb->xceiv)) {
-               dev_err(dev, "No transceiver configured\n");
-               return PTR_ERR(musb->xceiv);
+               err = PTR_ERR(musb->xceiv);
+               if (err != -EPROBE_DEFER)
+                       dev_err(dev, "No transceiver configured: %d", err);
+               return err;
        }
 
        /* Silicon does not implement ConfigData register.
index 15cca912c53e3e5849ac3f54b9f4ab8d57340d6a..5ebf30bd61bd1f4da6b45e0935d988f261a09f83 100644 (file)
@@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb);
 #define MUSB_QUIRK_B_INVALID_VBUS_91   (MUSB_DEVCTL_BDEVICE | \
                                         (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
                                         MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99     (MUSB_DEVCTL_BDEVICE | \
+                                        (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+                                        MUSB_DEVCTL_SESSION)
 #define MUSB_QUIRK_A_DISCONNECT_19     ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
                                         MUSB_DEVCTL_SESSION)
 
@@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb)
        s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
+       case MUSB_QUIRK_B_DISCONNECT_99:
+               musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+               schedule_delayed_work(&musb->irq_work,
+                                     msecs_to_jiffies(1000));
+               break;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
@@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_disable_interrupts(musb);
        musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 
+       /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+       musb_writeb(musb->mregs, MUSB_POWER, 0);
+
        /* Init IRQ workqueue before request_irq */
        INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
        INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
index 5fc6825745f21bd6246d71b99fd98d7313396f01..2d3751d885b429cb008e46dbfab5a295b815b794 100644 (file)
@@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
        controller->controller.channel_abort = dma_channel_abort;
 
        if (request_irq(irq, dma_controller_irq, 0,
-                       dev_name(musb->controller), &controller->controller)) {
+                       dev_name(musb->controller), controller)) {
                dev_err(dev, "request_irq %d failed!\n", irq);
                musb_dma_controller_destroy(&controller->controller);
 
index 8273126ffdf4bb03edfae115973ca3562a3a7104..63a00ff26655e3e4614ce9e745b40d0ee4d03b39 100644 (file)
@@ -169,8 +169,8 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
 void usb_role_switch_put(struct usb_role_switch *sw)
 {
        if (!IS_ERR_OR_NULL(sw)) {
-               put_device(&sw->dev);
                module_put(sw->dev.parent->driver->owner);
+               put_device(&sw->dev);
        }
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_put);
index 409851306e99295337836db5d0f4f9085213350f..80d6559bbcb2832a27c712aed701a9eb074f4482 100644 (file)
@@ -161,7 +161,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -EINVAL;
-       data->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       data->base = devm_ioremap(dev, res->start, resource_size(res));
        if (!data->base)
                return -ENOMEM;
 
index df582fe855f06b727aa517487703177ccfbf8d79..d3f420f3a083570d4d3372514c39430caa9fc8d8 100644 (file)
@@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty)
 static int ch341_reset_resume(struct usb_serial *serial)
 {
        struct usb_serial_port *port = serial->port[0];
-       struct ch341_private *priv = usb_get_serial_port_data(port);
+       struct ch341_private *priv;
        int ret;
 
+       priv = usb_get_serial_port_data(port);
+       if (!priv)
+               return 0;
+
        /* reconfigure ch341 serial port after bus-reset */
        ch341_configure(serial->dev, priv);
 
index 48a439298a68fa8533df50f6e65f0aa7809c1b18..5737add6a2a436b440792a14112288c4c4801c02 100644 (file)
@@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb)
                        if (txCredits) {
                                port = edge_serial->serial->port[portNumber];
                                edge_port = usb_get_serial_port_data(port);
-                               if (edge_port->open) {
+                               if (edge_port && edge_port->open) {
                                        spin_lock_irqsave(&edge_port->ep_lock,
                                                          flags);
                                        edge_port->txCredits += txCredits;
@@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
 static void process_rcvd_data(struct edgeport_serial *edge_serial,
                                unsigned char *buffer, __u16 bufferLength)
 {
-       struct device *dev = &edge_serial->serial->dev->dev;
+       struct usb_serial *serial = edge_serial->serial;
+       struct device *dev = &serial->dev->dev;
        struct usb_serial_port *port;
        struct edgeport_port *edge_port;
        __u16 lastBufferLength;
@@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
 
                        /* spit this data back into the tty driver if this
                           port is open */
-                       if (rxLen) {
-                               port = edge_serial->serial->port[
-                                                       edge_serial->rxPort];
+                       if (rxLen && edge_serial->rxPort < serial->num_ports) {
+                               port = serial->port[edge_serial->rxPort];
                                edge_port = usb_get_serial_port_data(port);
-                               if (edge_port->open) {
+                               if (edge_port && edge_port->open) {
                                        dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
                                                __func__, rxLen,
                                                edge_serial->rxPort);
@@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
                                                        rxLen);
                                        edge_port->port->icount.rx += rxLen;
                                }
-                               buffer += rxLen;
                        }
+                       buffer += rxLen;
                        break;
 
                case EXPECT_HDR3:       /* Expect 3rd byte of status header */
@@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
        __u8 code = edge_serial->rxStatusCode;
 
        /* switch the port pointer to the one being currently talked about */
+       if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+               return;
        port = edge_serial->serial->port[edge_serial->rxPort];
        edge_port = usb_get_serial_port_data(port);
        if (edge_port == NULL) {
@@ -2901,16 +2903,18 @@ static int edge_startup(struct usb_serial *serial)
        response = 0;
 
        if (edge_serial->is_epic) {
+               struct usb_host_interface *alt;
+
+               alt = serial->interface->cur_altsetting;
+
                /* EPIC thing, set up our interrupt polling now and our read
                 * urb, so that the device knows it really is connected. */
                interrupt_in_found = bulk_in_found = bulk_out_found = false;
-               for (i = 0; i < serial->interface->altsetting[0]
-                                               .desc.bNumEndpoints; ++i) {
+               for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
                        struct usb_endpoint_descriptor *endpoint;
                        int buffer_size;
 
-                       endpoint = &serial->interface->altsetting[0].
-                                                       endpoint[i].desc;
+                       endpoint = &alt->endpoint[i].desc;
                        buffer_size = usb_endpoint_maxp(endpoint);
                        if (!interrupt_in_found &&
                            (usb_endpoint_is_int_in(endpoint))) {
index e66a59ef43a1cb59cfa0d41b8af8c7316ededa69..aa3dbce22cfbe857af446804f28440c691729cb7 100644 (file)
@@ -1058,6 +1058,8 @@ static void       usa49_glocont_callback(struct urb *urb)
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
                p_priv = usb_get_serial_port_data(port);
+               if (!p_priv)
+                       continue;
 
                if (p_priv->resend_cont) {
                        dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb)
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
                p_priv = usb_get_serial_port_data(port);
+               if (!p_priv)
+                       continue;
 
                if (p_priv->resend_cont) {
                        dev_dbg(&port->dev, "%s - sending setup\n", __func__);
index cb7aac9cd9e72aadac7bc474ffecc4b57e88e828..ed2b4e6dca385bf4d6ae234e83bf11dd3e76df85 100644 (file)
@@ -113,7 +113,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
        retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                requesttype,
                                USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
-                               0, 0, buffer, 1, 0);
+                               0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
        kfree(buffer);
 
        if (retval < 0)
index e9491d400a24fae06c90387f585a3c88e38a71bd..084cc2fff3ae318f4819bc495f865c7952d79983 100644 (file)
@@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM12                   0x0512
+#define QUECTEL_PRODUCT_RM500Q                 0x0800
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -567,6 +568,9 @@ static void option_instat_callback(struct urb *urb);
 /* Interface must have two endpoints */
 #define NUMEP2         BIT(16)
 
+/* Device needs ZLP */
+#define ZLP            BIT(17)
+
 
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1101,6 +1105,11 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+         .driver_info = ZLP },
+
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1172,6 +1181,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),    /* Telit ME910 (ECM) */
          .driver_info = NCTRL(0) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff),    /* Telit ME910G1 */
+         .driver_info = NCTRL(0) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1196,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
          .driver_info = NCTRL(0) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* Telit SBL FN980 flashing device */
+         .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
@@ -2097,6 +2110,9 @@ static int option_attach(struct usb_serial *serial)
        if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
                data->use_send_setup = 1;
 
+       if (device_flags & ZLP)
+               data->use_zlp = 1;
+
        spin_lock_init(&data->susp_lock);
 
        usb_set_serial_data(serial, data);
index a62981ca7a73557bbc788a994ed99df82d669a34..f93b81a297d6798f6b10da299945e60533c8d56f 100644 (file)
@@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
        u8 newMSR = (u8) *ch;
        unsigned long flags;
 
+       /* May be called from qt2_process_read_urb() for an unbound port. */
        port_priv = usb_get_serial_port_data(port);
+       if (!port_priv)
+               return;
 
        spin_lock_irqsave(&port_priv->lock, flags);
        port_priv->shadowMSR = newMSR;
@@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
        unsigned long flags;
        u8 newLSR = (u8) *ch;
 
+       /* May be called from qt2_process_read_urb() for an unbound port. */
        port_priv = usb_get_serial_port_data(port);
+       if (!port_priv)
+               return;
 
        if (newLSR & UART_LSR_BI)
                newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
index edbbb13d6de6ee39285fef25268be3d08f4e3b0e..bd23a7cb1be2bceaa8422e88bc3cdb5255dd566c 100644 (file)
@@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS);
 #define MOTOROLA_TETRA_IDS()                   \
        { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
        { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+       { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+       { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
        { USB_DEVICE(0x0cad, 0x9016) }  /* TPG2200 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
index 8f066bb55d7d33eb18b8f43ba845c632ed100e2a..dc7a65b9ec982113f5b40dc1fb23c0179b655dac 100644 (file)
@@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
                return -EINVAL;
        }
 
+       /* Prevent individual ports from being unbound. */
+       driver->driver.suppress_bind_attrs = true;
+
        usb_serial_operations_init(driver);
 
        /* Add this device to our list of devices */
index 1c120eaf4091c05c4f279d5e703177a906074965..934e9361cf6bb47b0c5f2bcf327597c321589c20 100644 (file)
@@ -38,6 +38,7 @@ struct usb_wwan_intf_private {
        spinlock_t susp_lock;
        unsigned int suspended:1;
        unsigned int use_send_setup:1;
+       unsigned int use_zlp:1;
        int in_flight;
        unsigned int open_ports;
        void *private;
index 7e855c87e4f7bb1a1364923cacf9a5f6ae970285..13be21aad2f40c7eefd5588a105364d38eeed4e9 100644 (file)
@@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
                                      void (*callback) (struct urb *))
 {
        struct usb_serial *serial = port->serial;
+       struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
        struct urb *urb;
 
        urb = usb_alloc_urb(0, GFP_KERNEL);     /* No ISO */
@@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
                          usb_sndbulkpipe(serial->dev, endpoint) | dir,
                          buf, len, callback, ctx);
 
+       if (intfdata->use_zlp && dir == USB_DIR_OUT)
+               urb->transfer_flags |= URB_ZERO_PACKET;
+
        return urb;
 }
 
index 66a4dcbbb1fc9de2c5f160497926848ace8bb37a..f4c2359abb1b0015af491daff990b5dd9d83846c 100644 (file)
@@ -135,7 +135,8 @@ static int slave_configure(struct scsi_device *sdev)
         * For such controllers we need to make sure the block layer sets
         * up bounce buffers in addressable memory.
         */
-       if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)))
+       if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
+                       (bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL))
                blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
 
        /*
index 7ece6ca6e690b23e88348e55e4c38b710dd5515d..91d62276b56fe4d883fafb00587c2a8d76d2ef98 100644 (file)
@@ -1612,14 +1612,16 @@ struct typec_port *typec_register_port(struct device *parent,
 
        port->sw = typec_switch_get(&port->dev);
        if (IS_ERR(port->sw)) {
+               ret = PTR_ERR(port->sw);
                put_device(&port->dev);
-               return ERR_CAST(port->sw);
+               return ERR_PTR(ret);
        }
 
        port->mux = typec_mux_get(&port->dev, NULL);
        if (IS_ERR(port->mux)) {
+               ret = PTR_ERR(port->mux);
                put_device(&port->dev);
-               return ERR_CAST(port->mux);
+               return ERR_PTR(ret);
        }
 
        ret = device_add(&port->dev);
index 72481bbb2af39a79cda097ae164bb5e96111471b..5b986d6c801d9413b6bd40381b55ae12c3b97bbb 100644 (file)
@@ -32,6 +32,7 @@ endif # TYPEC_TCPCI
 config TYPEC_FUSB302
        tristate "Fairchild FUSB302 Type-C chip driver"
        depends on I2C
+       depends on EXTCON || !EXTCON
        help
          The Fairchild FUSB302 Type-C chip driver that works with
          Type-C Port Controller Manager to provide USB PD and USB
index c1f7073a56de7329adafc115655a96be7740ff88..8b4ff9fff340c9a3a4f06c33a6446dfb778c6a53 100644 (file)
@@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
 
        if (status & TCPC_ALERT_RX_STATUS) {
                struct pd_message msg;
-               unsigned int cnt;
+               unsigned int cnt, payload_cnt;
                u16 header;
 
                regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
+               /*
+                * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
+                * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
+                * defined in table 4-36 as one greater than the number of
+                * bytes received. And that number includes the header. So:
+                */
+               if (cnt > 3)
+                       payload_cnt = cnt - (1 + sizeof(msg.header));
+               else
+                       payload_cnt = 0;
 
                tcpci_read16(tcpci, TCPC_RX_HDR, &header);
                msg.header = cpu_to_le16(header);
 
-               if (WARN_ON(cnt > sizeof(msg.payload)))
-                       cnt = sizeof(msg.payload);
+               if (WARN_ON(payload_cnt > sizeof(msg.payload)))
+                       payload_cnt = sizeof(msg.payload);
 
-               if (cnt > 0)
+               if (payload_cnt > 0)
                        regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
-                                       &msg.payload, cnt);
+                                       &msg.payload, payload_cnt);
 
                /* Read complete, clear RX status alert bit */
                tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
index 8569bbd3762fdfde81e28a2b7cf7a36388f19fe0..831c9470bdc1f21227a662e06782c36ffd5c8afa 100644 (file)
@@ -94,15 +94,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
 #define UCSI_ENABLE_NTFY_CMD_COMPLETE          BIT(16)
 #define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE    BIT(17)
 #define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE     BIT(18)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE            BIT(19)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE      BIT(20)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE     BIT(21)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE            BIT(22)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE     BIT(23)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE                BIT(24)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE                BIT(25)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE      BIT(26)
-#define UCSI_ENABLE_NTFY_ERROR                 BIT(27)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE            BIT(21)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE      BIT(22)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE     BIT(23)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE            BIT(24)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE     BIT(25)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE                BIT(27)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE                BIT(28)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE      BIT(30)
+#define UCSI_ENABLE_NTFY_ERROR                 BIT(31)
 #define UCSI_ENABLE_NTFY_ALL                   0xdbe70000
 
 /* SET_UOR command bits */
index 3f1786170098593b7699c20a1c873c9406930140..9fc4f338e8700ec069d9154cf77ee8ef9880201f 100644 (file)
@@ -127,7 +127,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       /* This will make sure we can use ioremap_nocache() */
+       /* This will make sure we can use ioremap() */
        status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
        if (ACPI_FAILURE(status))
                return -ENOMEM;
index 6532d68e8808d03ad2ef4126466bdb547c4ffc66..e4b96674c40526c75726c9b5c53a81d427f83d14 100644 (file)
@@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
 
                        copy -= recv;
                        ret += recv;
+
+                       if (!copy)
+                               break;
                }
 
                if (ret != size)
index 33f8972ba842feedb7837854a78d281a73c9cce5..00fc98741c5d1176bed0cc399a5e1e6e39a8cab5 100644 (file)
@@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
        usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
 
        /* recv transfer buffer */
-       if (usbip_recv_xbuff(ud, urb) < 0)
-               return;
+       if (usbip_recv_xbuff(ud, urb) < 0) {
+               urb->status = -EPROTO;
+               goto error;
+       }
 
        /* recv iso_packet_descriptor */
-       if (usbip_recv_iso(ud, urb) < 0)
-               return;
+       if (usbip_recv_iso(ud, urb) < 0) {
+               urb->status = -EPROTO;
+               goto error;
+       }
 
        /* restore the padding in iso packets */
        usbip_pad_iso(ud, urb);
 
+error:
        if (usbip_dbg_flag_vhci_rx)
                usbip_dump_urb(urb);
 
index 0120d8324a402490f489bae6468fbe6f8b65af4d..a87992892a9f5ad07cf2c1a42b3ed31ace9a0598 100644 (file)
@@ -230,7 +230,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
        switch ((u32)pos) {
        case 0xa0000 ... 0xbffff:
                count = min(count, (size_t)(0xc0000 - pos));
-               iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+               iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1);
                off = pos - 0xa0000;
                rsrc = VGA_RSRC_LEGACY_MEM;
                is_ioport = false;
index 2d2babe21b2f2deca76e8582a9d40214d2d93042..40d4fb9276baa2881d3e14fdcd134f8e5c1646d2 100644 (file)
@@ -54,13 +54,13 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
 
        if (!xgmac_regs->ioaddr) {
                xgmac_regs->ioaddr =
-                       ioremap_nocache(xgmac_regs->addr, xgmac_regs->size);
+                       ioremap(xgmac_regs->addr, xgmac_regs->size);
                if (!xgmac_regs->ioaddr)
                        return -ENOMEM;
        }
        if (!xpcs_regs->ioaddr) {
                xpcs_regs->ioaddr =
-                       ioremap_nocache(xpcs_regs->addr, xpcs_regs->size);
+                       ioremap(xpcs_regs->addr, xpcs_regs->size);
                if (!xpcs_regs->ioaddr)
                        return -ENOMEM;
        }
index 16165a62b86de01d7a11d6daa45b8ea6cb3afce5..96064ef8f629d97a4282b4a938dcf51a1a9e8cfe 100644 (file)
@@ -82,7 +82,7 @@ static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
 
        /* Map FlexRM ring registers if not mapped */
        if (!reg->ioaddr) {
-               reg->ioaddr = ioremap_nocache(reg->addr, reg->size);
+               reg->ioaddr = ioremap(reg->addr, reg->size);
                if (!reg->ioaddr)
                        return -ENOMEM;
        }
index f67bab5475013223670a6376f30eb2113223f20d..09a9453b75c5592539dc23118ab365d507ccac6f 100644 (file)
@@ -52,7 +52,7 @@ static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
 
        if (!reg->ioaddr) {
                reg->ioaddr =
-                       ioremap_nocache(reg->addr, reg->size);
+                       ioremap(reg->addr, reg->size);
                if (!reg->ioaddr)
                        return -ENOMEM;
        }
index e8f2bdbe05428c79c9ebb9d0ab2575f08cfe247e..c0771a9567fb5dd7d4af1ec1b92b665c38cfa728 100644 (file)
@@ -409,7 +409,7 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
 
        if (!reg->ioaddr) {
                reg->ioaddr =
-                       ioremap_nocache(reg->addr, reg->size);
+                       ioremap(reg->addr, reg->size);
 
                if (!reg->ioaddr)
                        return -ENOMEM;
@@ -486,7 +486,7 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
 
        if (!reg->ioaddr) {
                reg->ioaddr =
-                       ioremap_nocache(reg->addr, reg->size);
+                       ioremap(reg->addr, reg->size);
 
                if (!reg->ioaddr)
                        return -ENOMEM;
index 9f3be0258623714789de2bf215e7b8d1cda820e7..27ba2ed4138aae04354b865e8148da12596cd79f 100644 (file)
@@ -633,7 +633,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
                ret = -EBUSY;
                goto err_free_hw;
        }
-       hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start,
+       hw->v_regs = ioremap(carminefb_fix.mmio_start,
                        carminefb_fix.mmio_len);
        if (!hw->v_regs) {
                printk(KERN_ERR "carminefb: Can't remap %s register.\n",
@@ -664,7 +664,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
                goto err_unmap_vregs;
        }
 
-       hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start,
+       hw->screen_mem = ioremap(carminefb_fix.smem_start,
                        carminefb_fix.smem_len);
        if (!hw->screen_mem) {
                printk(KERN_ERR "carmine: Can't ioremap smem area.\n");
index d18f7b31932c832d3a00b6e0475c8512b2db80a0..aa7583d963ac99153f9848c463e7f6591bbb65c2 100644 (file)
@@ -1883,7 +1883,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
        }
        par->res_flags |= MMIO_REQ;
 
-       par->mmio_start_virtual = ioremap_nocache(par->mmio_start_phys, 
+       par->mmio_start_virtual = ioremap(par->mmio_start_phys, 
                                                  MMIO_SIZE);
        if (!par->mmio_start_virtual) {
                printk("i810fb_init: cannot remap mmio region\n");
index a76c61512c6088efa61f5aacdc3bdb4c0aa74896..a09fc2eaa40d955dc1e2cb9bb604a1b71fd71660 100644 (file)
@@ -654,7 +654,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
        }
 
        dinfo->mmio_base =
-               (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+               (u8 __iomem *)ioremap(dinfo->mmio_base_phys,
                                              INTEL_REG_SIZE);
        if (!dinfo->mmio_base) {
                ERR_MSG("Cannot remap MMIO region.\n");
index a7bd9f25911b534d10e23ca1eae7ccac6ba99288..a8660926924b029b08975f290bdb6d345eae9322 100644 (file)
@@ -683,7 +683,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        kyro_fix.mmio_len   = pci_resource_len(pdev, 1);
 
        currentpar->regbase = deviceInfo.pSTGReg =
-               ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len);
+               ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len);
        if (!currentpar->regbase)
                goto out_free_fb;
 
index 1a555f70923a867204aeca83c2c397c485bd5f5e..36cc718b96aea0f7d3975bfa0cc0b3b57fdb7c15 100644 (file)
@@ -1710,7 +1710,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
                memsize = mem;
        err = -ENOMEM;
 
-       minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384);
+       minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384);
        if (!minfo->mmio.vbase.vaddr) {
                printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys);
                goto failVideoMR;
index 50935252b50b6602a720460988cca0e71b06f4c3..3de4b3ed990ae8bcd7e30797002d02bfe976d201 100644 (file)
@@ -938,7 +938,7 @@ static int mbxfb_probe(struct platform_device *dev)
        }
        mfbi->reg_phys_addr = mfbi->reg_res->start;
 
-       mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev,
+       mfbi->reg_virt_addr = devm_ioremap(&dev->dev,
                                                   mfbi->reg_phys_addr,
                                                   res_size(mfbi->reg_req));
        if (!mfbi->reg_virt_addr) {
@@ -948,7 +948,7 @@ static int mbxfb_probe(struct platform_device *dev)
        }
        virt_base_2700 = mfbi->reg_virt_addr;
 
-       mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr,
+       mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr,
                                                  res_size(mfbi->fb_req));
        if (!mfbi->fb_virt_addr) {
                dev_err(&dev->dev, "failed to ioremap frame buffer\n");
index 17174cd7a5bba5e2046198e29feec88b7133ba8c..974e4c28b08bed1300715c03050fa32c7e262d1c 100644 (file)
@@ -485,7 +485,7 @@ static int mmphw_probe(struct platform_device *pdev)
                goto failed;
        }
 
-       ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
+       ctrl->reg_base = devm_ioremap(ctrl->dev,
                        res->start, resource_size(res));
        if (ctrl->reg_base == NULL) {
                dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
index 1dcf02e12af4f3ee676bd7be8e4f59b71f864c21..7cc1216b1389ec4d6b376355a893d2d88b8e1d06 100644 (file)
@@ -1563,7 +1563,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_exit_neither;
        }
        default_par->v_regs =
-               ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+               ioremap(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
        if (!default_par->v_regs) {
                printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n",
                       pm2fb_fix.id);
index 6130aa56a1e93239dbad34558ed6273ce155d4e2..2fa46607e0fcc736b2f76ec0896be80121cc9092 100644 (file)
@@ -1236,7 +1236,7 @@ static unsigned long pm3fb_size_memory(struct pm3_par *par)
                return 0;
        }
        screen_mem =
-               ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
+               ioremap(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
        if (!screen_mem) {
                printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n");
                release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
@@ -1347,7 +1347,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
                goto err_exit_neither;
        }
        par->v_regs =
-               ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
+               ioremap(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len);
        if (!par->v_regs) {
                printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n",
                        pm3fb_fix.id);
index d1e78ce3a9c2d255dad9a07b493a221e1c99eb54..d5bf185fc3762bc9100ac3d058f2dd1c8e8da0eb 100644 (file)
@@ -188,7 +188,7 @@ static int pmagaafb_probe(struct device *dev)
 
        /* MMIO mapping setup. */
        info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET;
-       par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+       par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
        if (!par->mmio) {
                printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
                err = -ENOMEM;
@@ -199,7 +199,7 @@ static int pmagaafb_probe(struct device *dev)
 
        /* Frame buffer mapping setup. */
        info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET;
-       info->screen_base = ioremap_nocache(info->fix.smem_start,
+       info->screen_base = ioremap(info->fix.smem_start,
                                            info->fix.smem_len);
        if (!info->screen_base) {
                printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
index 56b912bb28de622bb8de31cb2b6c84206ade1b8d..2ddcdf7919a229cd29635a2cac4cac0a356d014f 100644 (file)
@@ -180,7 +180,7 @@ static int pmagbafb_probe(struct device *dev)
 
        /* MMIO mapping setup.  */
        info->fix.mmio_start = start;
-       par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+       par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
        if (!par->mmio) {
                printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
                err = -ENOMEM;
@@ -190,7 +190,7 @@ static int pmagbafb_probe(struct device *dev)
 
        /* Frame buffer mapping setup.  */
        info->fix.smem_start = start + PMAG_BA_FBMEM;
-       info->screen_base = ioremap_nocache(info->fix.smem_start,
+       info->screen_base = ioremap(info->fix.smem_start,
                                            info->fix.smem_len);
        if (!info->screen_base) {
                printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
index 2822b2225924e29f8e38c16f9c88e9676c24cf6d..90d2b04feb428408b6221a2c6e06cbea05362da2 100644 (file)
@@ -287,7 +287,7 @@ static int pmagbbfb_probe(struct device *dev)
 
        /* MMIO mapping setup.  */
        info->fix.mmio_start = start;
-       par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+       par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len);
        if (!par->mmio) {
                printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
                err = -ENOMEM;
@@ -298,7 +298,7 @@ static int pmagbbfb_probe(struct device *dev)
 
        /* Frame buffer mapping setup.  */
        info->fix.smem_start = start + PMAGB_B_FBMEM;
-       par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
+       par->smem = ioremap(info->fix.smem_start, info->fix.smem_len);
        if (!par->smem) {
                printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
                err = -ENOMEM;
index 0a3b2b7c789125a63855354fb2200f427e39416e..c680b3e651cb3e6a3e4af960c61dcdab114d654e 100644 (file)
@@ -770,7 +770,7 @@ static int __maybe_unused pvr2fb_common_init(void)
        struct pvr2fb_par *par = currentpar;
        unsigned long modememused, rev;
 
-       fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
+       fb_info->screen_base = ioremap(pvr2_fix.smem_start,
                                               pvr2_fix.smem_len);
 
        if (!fb_info->screen_base) {
@@ -778,7 +778,7 @@ static int __maybe_unused pvr2fb_common_init(void)
                goto out_err;
        }
 
-       par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start,
+       par->mmio_base = ioremap(pvr2_fix.mmio_start,
                                         pvr2_fix.mmio_len);
        if (!par->mmio_base) {
                printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
index 1410f476e135d99aa1d5a16277e31c42908a3f85..5615054a0cad5497ae44cc2d26480eda23b0d644 100644 (file)
@@ -665,7 +665,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        /*
         * Map LCD controller registers.
         */
-       fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start,
+       fbi->reg_base = devm_ioremap(&pdev->dev, res->start,
                                             resource_size(res));
        if (fbi->reg_base == NULL) {
                ret = -ENOMEM;
index e04efb567b5c1cc0f34d71284311e02703d1b678..8048499e398dd328731187d6bebe8e2ac417ed76 100644 (file)
@@ -809,7 +809,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, info);
        default_par = info->par;
-       default_par->regs = ioremap_nocache(pdev->resource[1].start,
+       default_par->regs = ioremap(pdev->resource[1].start,
                        pdev->resource[1].end - pdev->resource[1].start +1);
        if (!default_par->regs) {
                printk(KERN_ERR PFX "unable to map registers\n");
@@ -818,7 +818,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
        }
        info->pseudo_palette = default_par->pseudo_palette;
 
-       info->screen_base = ioremap_nocache(pdev->resource[0].start,
+       info->screen_base = ioremap(pdev->resource[0].start,
                        pdev->resource[0].end - pdev->resource[0].start +1);
 
        if (!info->screen_base) {
index ab8fe838c7763fac194884767c072b2e08b59b76..f72b0359471910db231e16b37132a52ed691ca5a 100644 (file)
@@ -463,7 +463,7 @@ static int sh7760fb_probe(struct platform_device *pdev)
                goto out_fb;
        }
 
-       par->base = ioremap_nocache(res->start, resource_size(res));
+       par->base = ioremap(res->start, resource_size(res));
        if (!par->base) {
                dev_err(&pdev->dev, "cannot remap\n");
                ret = -ENODEV;
index c249763dbf0bf527f584fbe86fea3f1f83838495..54ee7e02a244981ce1f55720b0f1aad4ce1ff5ab 100644 (file)
@@ -2588,7 +2588,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
        if (num_channels == 2)
                priv->forced_fourcc = pdata->ch[0].fourcc;
 
-       priv->base = ioremap_nocache(res->start, resource_size(res));
+       priv->base = ioremap(res->start, resource_size(res));
        if (!priv->base) {
                error = -ENOMEM;
                goto err1;
index 4e22ae383c87ede1f0918d238229b6198ef3f337..1f171a52717411cd31adc1c360f98d6ee2703f60 100644 (file)
@@ -1363,14 +1363,14 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto fail_fb_mem;
        }
 
-       par->mmio_vbase = ioremap_nocache(fix->mmio_start,
+       par->mmio_vbase = ioremap(fix->mmio_start,
                                        fix->mmio_len);
        if (!par->mmio_vbase) {
                printk(KERN_ERR "sstfb: cannot remap register area %#lx\n",
                        fix->mmio_start);
                goto fail_mmio_remap;
        }
-       info->screen_base = ioremap_nocache(fix->smem_start, 0x400000);
+       info->screen_base = ioremap(fix->smem_start, 0x400000);
        if (!info->screen_base) {
                printk(KERN_ERR "sstfb: cannot remap framebuffer %#lx\n",
                        fix->smem_start);
index 9e88e3f594c29c4d4a0c7362500b494fbf0ca2db..46709443a82f978bef7c6f77cded05772a234d35 100644 (file)
@@ -1198,7 +1198,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
        case S9000_ID_TOMCAT:   /* Dual CRX, behaves else like a CRX */
                /* FIXME: TomCat supports two heads:
                 * fb.iobase = REGION_BASE(fb_info,3);
-                * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx);
+                * fb.screen_base = ioremap(REGION_BASE(fb_info,2),xxx);
                 * for now we only support the left one ! */
                xres = fb->ngle_rom.x_size_visible;
                yres = fb->ngle_rom.y_size_visible;
@@ -1291,7 +1291,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
 
        strcpy(fix->id, "stifb");
        info->fbops = &stifb_ops;
-       info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
+       info->screen_base = ioremap(REGION_BASE(fb,1), fix->smem_len);
        if (!info->screen_base) {
                printk(KERN_ERR "stifb: failed to map memory\n");
                goto out_err0;
index fdbb1ea66e6cd43c4a37cb8a2ee7fc2f8053d367..0337d1a1a70be29caa24e6f8522b26f92501ffb9 100644 (file)
@@ -1417,7 +1417,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        default_par->regbase_virt =
-               ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+               ioremap(info->fix.mmio_start, info->fix.mmio_len);
        if (!default_par->regbase_virt) {
                printk(KERN_ERR "fb: Can't remap %s register area.\n",
                                info->fix.id);
index 286b2371c7dd3bb09d6c88dbc6788cee92f1614a..1966f1d70899a1fea21ca03b67a1e586d515b4c0 100644 (file)
@@ -1438,7 +1438,7 @@ static int tgafb_register(struct device *dev)
        }
 
        /* Map the framebuffer.  */
-       mem_base = ioremap_nocache(bar0_start, bar0_len);
+       mem_base = ioremap(bar0_start, bar0_len);
        if (!mem_base) {
                printk(KERN_ERR "tgafb: Cannot map MMIO\n");
                goto err1;
index da74bf6c5996390b84dd5b70a02f439dfcab0ee5..91b2f6ca2607307cf903aaad7d0e6baa82f7f098 100644 (file)
@@ -1556,7 +1556,7 @@ static int trident_pci_probe(struct pci_dev *dev,
                return -1;
        }
 
-       default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start,
+       default_par->io_virt = ioremap(tridentfb_fix.mmio_start,
                                               tridentfb_fix.mmio_len);
 
        if (!default_par->io_virt) {
@@ -1579,7 +1579,7 @@ static int trident_pci_probe(struct pci_dev *dev,
                goto out_unmap1;
        }
 
-       info->screen_base = ioremap_nocache(tridentfb_fix.smem_start,
+       info->screen_base = ioremap(tridentfb_fix.smem_start,
                                            tridentfb_fix.smem_len);
 
        if (!info->screen_base) {
index e04fde9c1fcdeaca9300c365d8d39a76cb463071..97a59b5a45708f540161742a25a7b24d9c55850a 100644 (file)
@@ -356,7 +356,7 @@ int __init valkyriefb_init(void)
        p->total_vram = 0x100000;
        p->frame_buffer_phys = frame_buffer_phys;
 #ifdef CONFIG_MAC
-       p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+       p->frame_buffer = ioremap(frame_buffer_phys, p->total_vram);
 #else
        p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
 #endif
index c1e3738e67890a0f6e4ab136ffd4411114223255..79d42b23d8501711b636b062f569892e007fafbf 100644 (file)
@@ -159,7 +159,7 @@ static int __init cr_pll_init(void)
        pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR,
                              &mch_bar);
        mch_regs_base =
-           ioremap_nocache(mch_bar, CRVML_MCHMAP_SIZE);
+           ioremap(mch_bar, CRVML_MCHMAP_SIZE);
        if (!mch_regs_base) {
                printk(KERN_ERR
                       "Carillo Ranch MCH device was not enabled.\n");
index 498038a964ee3b2d6e436abcec03fc2b299806ef..ff61605b8764fc7262fd90d85f6360c93276d374 100644 (file)
@@ -317,7 +317,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
                       ": Could not claim display controller MMIO.\n");
                return -EBUSY;
        }
-       par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size);
+       par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
        if (par->vdc_mem == NULL) {
                printk(KERN_ERR MODULE_NAME
                       ": Could not map display controller MMIO.\n");
@@ -332,7 +332,7 @@ static int vmlfb_enable_mmio(struct vml_par *par)
                err = -EBUSY;
                goto out_err_1;
        }
-       par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size);
+       par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
        if (par->gpu_mem == NULL) {
                printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
                err = -ENOMEM;
index ffa2ca2d3f5e53c9712a8484c39fda820d19495f..703ddee9a24438495b7c9ec158c03788c23a2f80 100644 (file)
@@ -442,7 +442,7 @@ static int via_pci_setup_mmio(struct viafb_dev *vdev)
         */
        vdev->engine_start = pci_resource_start(vdev->pdev, 1);
        vdev->engine_len = pci_resource_len(vdev->pdev, 1);
-       vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+       vdev->engine_mmio = ioremap(vdev->engine_start,
                        vdev->engine_len);
        if (vdev->engine_mmio == NULL)
                dev_err(&vdev->pdev->dev,
index 3be07807edcdfeeb75e667ea17d8db8fbe0e7c57..0796b1d909811b06d1e2284a9821e32315d737f8 100644 (file)
@@ -648,12 +648,12 @@ int w100fb_probe(struct platform_device *pdev)
                return -EINVAL;
 
        /* Remap the chip base address */
-       remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
+       remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN);
        if (remapped_base == NULL)
                goto out;
 
        /* Map the register space */
-       remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
+       remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN);
        if (remapped_regs == NULL)
                goto out;
 
@@ -672,7 +672,7 @@ int w100fb_probe(struct platform_device *pdev)
        printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
 
        /* Remap the framebuffer */
-       remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
+       remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
        if (remapped_fbuf == NULL)
                goto out;
 
index 2307b0329aec4cfbd030adff45d44b3f6c12cca9..d823d558c0c4315a8aaae146c623a721861f6846 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/sizes.h>
index 43c391626a000d98bb6461d274cd2317d98e2913..50920b6fc3199c90d3d048409cf50fbf0cdeeb93 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
index e05679c478e2b3b22b3d9048d0c5f866ef2ac5ed..93f995f6cf3645bab73cfe6def29c83a54b2e1d7 100644 (file)
 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
                                             __GFP_NOMEMALLOC)
 /* The order of free page blocks to report to host */
-#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1)
+#define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1)
 /* The size of a free page block in bytes */
-#define VIRTIO_BALLOON_FREE_PAGE_SIZE \
-       (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT))
+#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
+       (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
+#define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
 
 #ifdef CONFIG_BALLOON_COMPACTION
 static struct vfsmount *balloon_mnt;
@@ -380,7 +381,7 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
                if (!page)
                        break;
                free_pages((unsigned long)page_address(page),
-                          VIRTIO_BALLOON_FREE_PAGE_ORDER);
+                          VIRTIO_BALLOON_HINT_BLOCK_ORDER);
        }
        vb->num_free_page_blocks -= num_returned;
        spin_unlock_irq(&vb->free_page_list_lock);
@@ -582,7 +583,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
                ;
 
        page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
-                          VIRTIO_BALLOON_FREE_PAGE_ORDER);
+                          VIRTIO_BALLOON_HINT_BLOCK_ORDER);
        /*
         * When the allocation returns NULL, it indicates that we have got all
         * the possible free pages, so return -EINTR to stop.
@@ -591,13 +592,13 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
                return -EINTR;
 
        p = page_address(page);
-       sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE);
+       sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES);
        /* There is always 1 entry reserved for the cmd id to use. */
        if (vq->num_free > 1) {
                err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
                if (unlikely(err)) {
                        free_pages((unsigned long)p,
-                                  VIRTIO_BALLOON_FREE_PAGE_ORDER);
+                                  VIRTIO_BALLOON_HINT_BLOCK_ORDER);
                        return err;
                }
                virtqueue_kick(vq);
@@ -610,7 +611,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb)
                 * The vq has no available entry to add this page block, so
                 * just free it.
                 */
-               free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER);
+               free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER);
        }
 
        return 0;
@@ -721,6 +722,17 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
 
        get_page(newpage); /* balloon reference */
 
+       /*
+         * When we migrate a page to a different zone and adjusted the
+         * managed page count when inflating, we have to fixup the count of
+         * both involved zones.
+         */
+       if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
+           page_zone(page) != page_zone(newpage)) {
+               adjust_managed_page_count(page, 1);
+               adjust_managed_page_count(newpage, -1);
+       }
+
        /* balloon's page migration 1st step  -- inflate "newpage" */
        spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
        balloon_page_insert(vb_dev_info, newpage);
@@ -765,11 +777,11 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
        unsigned long blocks_to_free, blocks_freed;
 
        pages_to_free = round_up(pages_to_free,
-                                1 << VIRTIO_BALLOON_FREE_PAGE_ORDER);
-       blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+                                VIRTIO_BALLOON_HINT_BLOCK_PAGES);
+       blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES;
        blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
 
-       return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+       return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
 }
 
 static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
@@ -826,7 +838,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
        unsigned long count;
 
        count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
-       count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+       count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
 
        return count;
 }
index 1b6e42e5e8fdad63eee8cf4f399d8dc1e40bad33..51e056bae943e0d0610370c753645d590f986314 100644 (file)
@@ -55,7 +55,7 @@ static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* Map registers in BAR 0 */
-       vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
+       vmic_base = ioremap(pci_resource_start(pdev, 0), 16);
        if (!vmic_base) {
                dev_err(&pdev->dev, "Unable to remap CRG region\n");
                retval = -EIO;
index 1edb8a5de873f1f7fe99d4073571b61520fd9c9a..ea938dc29c5e314e756e37940bc711630f1277c4 100644 (file)
@@ -554,7 +554,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
                goto err_resource;
        }
 
-       image->kern_base = ioremap_nocache(
+       image->kern_base = ioremap(
                image->bus_resource.start, size);
        if (!image->kern_base) {
                dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
@@ -1638,7 +1638,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* map registers in BAR 0 */
-       ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+       ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
                4096);
        if (!ca91cx42_device->base) {
                dev_err(&pdev->dev, "Unable to remap CRG region\n");
index 7e079d39bd76f4f2a7180dbffc4765eb12372e2b..50ae26977a0277596e51ebd55480f0bf58596126 100644 (file)
@@ -770,7 +770,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
                goto err_resource;
        }
 
-       image->kern_base = ioremap_nocache(
+       image->kern_base = ioremap(
                image->bus_resource.start, size);
        if (!image->kern_base) {
                dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
@@ -2317,7 +2317,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* map registers in BAR 0 */
-       tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+       tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
                4096);
        if (!tsi148_device->base) {
                dev_err(&pdev->dev, "Unable to remap CRG region\n");
index 3110791a2f1cdd6831190c42b5caf5d1d956e981..ee716c7157100415a71ad28396e4dec6b9185b92 100644 (file)
@@ -139,7 +139,7 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent
 
        dev->phys_addr = pci_resource_start(pdev, 1);
 
-       dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
+       dev->virt_addr = ioremap(dev->phys_addr, 16384);
        if (!dev->virt_addr) {
                dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
                        __func__, dev->phys_addr, 16384);
index 1679e0dc869b049b1282a2db04a31ad946bc9426..cec868f8db3f9645ac9bd7741f7f60e28c86bbb6 100644 (file)
@@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG
 config MAX77620_WATCHDOG
        tristate "Maxim Max77620 Watchdog Timer"
        depends on MFD_MAX77620 || COMPILE_TEST
+       select WATCHDOG_CORE
        help
         This is the driver for the Max77620 watchdog timer.
         Say 'Y' here to enable the watchdog timer support for
@@ -1444,6 +1445,7 @@ config SMSC37B787_WDT
 config TQMX86_WDT
        tristate "TQ-Systems TQMX86 Watchdog Timer"
        depends on X86
+       select WATCHDOG_CORE
        help
        This is the driver for the hardware watchdog timer in the TQMX86 IO
        controller found on some of their ComExpress Modules.
index 8a043b52aa2f9786ac3bb0f9615dfe363bfa050e..7cdb25363ea072f27573368b1ed3fdfb12d3b1ab 100644 (file)
@@ -246,7 +246,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start,
+       bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start,
                                                        resource_size(r));
        if (!bcm63xx_wdt_device.regs) {
                dev_err(&pdev->dev, "failed to remap I/O resources\n");
index 0a87c6f4bab222928106b9091639cf0b55d84831..11b9e7c6b7f596a3b5a28c4bfe7acf0e02bece85 100644 (file)
@@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
 {
        struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
 
-       imx7ulp_wdt_enable(wdt->base, true);
+       imx7ulp_wdt_enable(wdog, true);
        imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
 
        /* wait for wdog to fire */
index 6ad5bf3451ec0416c2b26e1fc85f1a5ffdac2ebf..804e35940983e4a41d8275239ff56e55a49527d3 100644 (file)
@@ -463,7 +463,7 @@ static int __init intel_scu_watchdog_init(void)
                return -ENODEV;
        }
 
-       tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+       tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr,
                        20);
 
        if (tmp_addr == NULL) {
index 1cccf8eb1c5d4ed4cdad590027cc181b89dfb146..8e6dfe76f9c9d48affaa01a1f927eb25308acd2b 100644 (file)
@@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
                set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
 
        /* Request the IRQ only after the watchdog is disabled */
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq > 0) {
                /*
                 * Not all supported platforms specify an interrupt for the
@@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
        }
 
        /* Optional 2nd interrupt for pretimeout */
-       irq = platform_get_irq(pdev, 1);
+       irq = platform_get_irq_optional(pdev, 1);
        if (irq > 0) {
                orion_wdt_info.options |= WDIOF_PRETIMEOUT;
                ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
index 1dfede0abf18e93a46a8965b00f1e236ae838823..aee3c2efd565e7fa46ecc13816551d0d9f4c8a80 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/platform_device.h>     /* For platform_driver framework */
 #include <linux/spinlock.h>            /* For spin_lock/spin_unlock/... */
 #include <linux/uaccess.h>             /* For copy_to_user/put_user/... */
-#include <linux/io.h>                  /* For devm_ioremap_nocache */
+#include <linux/io.h>                  /* For devm_ioremap */
 
 #include <asm/mach-rc32434/integ.h>    /* For the Watchdog registers */
 
@@ -267,7 +267,7 @@ static int rc32434_wdt_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
+       wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r));
        if (!wdt_reg) {
                pr_err("failed to remap I/O resources\n");
                return -ENXIO;
index 2348760474317e6fd8a33104bab713bbc5c09d05..6e524c8e26a8f06a39ac0bf18aa2019c03c88fb2 100644 (file)
@@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = {
 
 module_platform_driver(rn5t618_wdt_driver);
 
+MODULE_ALIAS("platform:rn5t618-wdt");
 MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
 MODULE_DESCRIPTION("RN5T618 watchdog driver");
 MODULE_LICENSE("GPL v2");
index fdf533fe0bb21d8c72a00cee7d40110686d458ba..56a4a4030ca9650b902f8ac55511b3c71ff4c6a5 100644 (file)
@@ -420,7 +420,7 @@ static int wdt_find(int addr)
                cr_wdt_csr = NCT6102D_WDT_CSR;
                break;
        case NCT6116_ID:
-               ret = nct6102;
+               ret = nct6116;
                cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
                cr_wdt_control = NCT6102D_WDT_CONTROL;
                cr_wdt_csr = NCT6102D_WDT_CSR;
index 4f2e78a5e4dbee31147978a228646b30e6ed8d28..0c142bcab79d61d50e67a97862aecabe7c0542a2 100644 (file)
@@ -394,7 +394,8 @@ static struct notifier_block xen_memory_nb = {
 #else
 static enum bp_state reserve_additional_memory(void)
 {
-       balloon_stats.target_pages = balloon_stats.current_pages;
+       balloon_stats.target_pages = balloon_stats.current_pages +
+                                    balloon_stats.target_unpopulated;
        return BP_ECANCELED;
 }
 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
index 49b381e104efaf64469c75e35668e10efac4ba4d..7b36b51cdb9f978657ec1e6fa00c5d6ee2b13b6b 100644 (file)
@@ -664,7 +664,6 @@ static int grow_gnttab_list(unsigned int more_frames)
        unsigned int nr_glist_frames, new_nr_glist_frames;
        unsigned int grefs_per_frame;
 
-       BUG_ON(gnttab_interface == NULL);
        grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 
        new_nr_grant_frames = nr_grant_frames + more_frames;
@@ -1160,7 +1159,6 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
 
 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
 {
-       BUG_ON(gnttab_interface == NULL);
        return gnttab_frames(nr_grant_frames, SPP);
 }
 
@@ -1388,7 +1386,6 @@ static int gnttab_expand(unsigned int req_entries)
        int rc;
        unsigned int cur, extra;
 
-       BUG_ON(gnttab_interface == NULL);
        cur = nr_grant_frames;
        extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
                 gnttab_interface->grefs_per_grant_frame);
@@ -1423,7 +1420,6 @@ int gnttab_init(void)
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
-       BUG_ON(gnttab_interface == NULL);
        max_nr_glist_frames = (max_nr_grant_frames *
                               gnttab_interface->grefs_per_grant_frame / RPP);
 
index 8b9919c26095dfbb70ad648f53874312d87c4283..70650b248de5d43dae2b4ae01d1fbebfaa6d4c10 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/sched.h>
 #include <xen/xen-ops.h>
 
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
 
 /*
  * Some hypercalls issued by the toolstack can take many 10s of
@@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
                __this_cpu_write(xen_in_preemptible_hcall, true);
        }
 }
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
index d75a2385b37c773773beec66b16541b8cb0acfe1..5f5b8a7d5b80b998425dfcd5cc900d5ba4855c2e 100644 (file)
@@ -116,8 +116,6 @@ int xenbus_probe_devices(struct xen_bus_type *bus);
 
 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
 
-void xenbus_dev_shutdown(struct device *_dev);
-
 int xenbus_dev_suspend(struct device *dev);
 int xenbus_dev_resume(struct device *dev);
 int xenbus_dev_cancel(struct device *dev);
index c21be6e9d38a6c91ae75fb4e08a28588c1164df6..378486b79f96aeec8e308f5630f9ffc168ae16a6 100644 (file)
@@ -255,7 +255,6 @@ fail_put:
        module_put(drv->driver.owner);
 fail:
        xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
-       xenbus_switch_state(dev, XenbusStateClosed);
        return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_probe);
@@ -276,34 +275,20 @@ int xenbus_dev_remove(struct device *_dev)
 
        free_otherend_details(dev);
 
-       xenbus_switch_state(dev, XenbusStateClosed);
+       /*
+        * If the toolstack has forced the device state to closing then set
+        * the state to closed now to allow it to be cleaned up.
+        * Similarly, if the driver does not support re-bind, set the
+        * closed.
+        */
+       if (!drv->allow_rebind ||
+           xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
+               xenbus_switch_state(dev, XenbusStateClosed);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_remove);
 
-void xenbus_dev_shutdown(struct device *_dev)
-{
-       struct xenbus_device *dev = to_xenbus_device(_dev);
-       unsigned long timeout = 5*HZ;
-
-       DPRINTK("%s", dev->nodename);
-
-       get_device(&dev->dev);
-       if (dev->state != XenbusStateConnected) {
-               pr_info("%s: %s: %s != Connected, skipping\n",
-                       __func__, dev->nodename, xenbus_strstate(dev->state));
-               goto out;
-       }
-       xenbus_switch_state(dev, XenbusStateClosing);
-       timeout = wait_for_completion_timeout(&dev->down, timeout);
-       if (!timeout)
-               pr_info("%s: %s timeout closing device\n",
-                       __func__, dev->nodename);
- out:
-       put_device(&dev->dev);
-}
-EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
-
 int xenbus_register_driver_common(struct xenbus_driver *drv,
                                  struct xen_bus_type *bus,
                                  struct module *owner, const char *mod_name)
index b0bed4faf44cc85a918a4fdb8a3929b846c13ec8..14876faff3b03ed33c7855db863d9d904e94a329 100644 (file)
@@ -198,7 +198,6 @@ static struct xen_bus_type xenbus_backend = {
                .uevent         = xenbus_uevent_backend,
                .probe          = xenbus_dev_probe,
                .remove         = xenbus_dev_remove,
-               .shutdown       = xenbus_dev_shutdown,
                .dev_groups     = xenbus_dev_groups,
        },
 };
index a7d90a719cea6727259dad81af433bf92779aed4..8a1650bbe18ffc426acc88b65397b99f1c714d63 100644 (file)
@@ -126,6 +126,28 @@ static int xenbus_frontend_dev_probe(struct device *dev)
        return xenbus_dev_probe(dev);
 }
 
+static void xenbus_frontend_dev_shutdown(struct device *_dev)
+{
+       struct xenbus_device *dev = to_xenbus_device(_dev);
+       unsigned long timeout = 5*HZ;
+
+       DPRINTK("%s", dev->nodename);
+
+       get_device(&dev->dev);
+       if (dev->state != XenbusStateConnected) {
+               pr_info("%s: %s: %s != Connected, skipping\n",
+                       __func__, dev->nodename, xenbus_strstate(dev->state));
+               goto out;
+       }
+       xenbus_switch_state(dev, XenbusStateClosing);
+       timeout = wait_for_completion_timeout(&dev->down, timeout);
+       if (!timeout)
+               pr_info("%s: %s timeout closing device\n",
+                       __func__, dev->nodename);
+ out:
+       put_device(&dev->dev);
+}
+
 static const struct dev_pm_ops xenbus_pm_ops = {
        .suspend        = xenbus_dev_suspend,
        .resume         = xenbus_frontend_dev_resume,
@@ -146,7 +168,7 @@ static struct xen_bus_type xenbus_frontend = {
                .uevent         = xenbus_uevent_frontend,
                .probe          = xenbus_frontend_dev_probe,
                .remove         = xenbus_dev_remove,
-               .shutdown       = xenbus_dev_shutdown,
+               .shutdown       = xenbus_frontend_dev_shutdown,
                .dev_groups     = xenbus_dev_groups,
 
                .pm             = &xenbus_pm_ops,
index fd5133e26a38b01af89ef488be709eed7987a7c5..78ba5f9322879ebdb7449806fbdc1cf2c2595887 100644 (file)
@@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
                _leave(" = -ENAMETOOLONG");
                return ERR_PTR(-ENAMETOOLONG);
        }
-       if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
+
+       /* Prohibit cell names that contain unprintable chars, '/' and '@' or
+        * that begin with a dot.  This also precludes "@cell".
+        */
+       if (name[0] == '.')
                return ERR_PTR(-EINVAL);
+       for (i = 0; i < namelen; i++) {
+               char ch = name[i];
+               if (!isprint(ch) || ch == '/' || ch == '@')
+                       return ERR_PTR(-EINVAL);
+       }
 
        _enter("%*.*s,%s", namelen, namelen, name, addresses);
 
index 497f979018c2f5f38ee5d69c6a9f999acfa6a1e5..5c794f4b051afcc953f97ceff3b562402a31b4e0 100644 (file)
@@ -908,6 +908,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
                                 unsigned int flags)
 {
        struct afs_vnode *dvnode = AFS_FS_I(dir);
+       struct afs_fid fid = {};
        struct inode *inode;
        struct dentry *d;
        struct key *key;
@@ -951,21 +952,18 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
        afs_stat_v(dvnode, n_lookup);
        inode = afs_do_lookup(dir, dentry, key);
        key_put(key);
-       if (inode == ERR_PTR(-ENOENT)) {
+       if (inode == ERR_PTR(-ENOENT))
                inode = afs_try_auto_mntpt(dentry, dir);
-       } else {
-               dentry->d_fsdata =
-                       (void *)(unsigned long)dvnode->status.data_version;
-       }
+
+       if (!IS_ERR_OR_NULL(inode))
+               fid = AFS_FS_I(inode)->fid;
+
        d = d_splice_alias(inode, dentry);
        if (!IS_ERR_OR_NULL(d)) {
                d->d_fsdata = dentry->d_fsdata;
-               trace_afs_lookup(dvnode, &d->d_name,
-                                inode ? AFS_FS_I(inode) : NULL);
+               trace_afs_lookup(dvnode, &d->d_name, &fid);
        } else {
-               trace_afs_lookup(dvnode, &dentry->d_name,
-                                IS_ERR_OR_NULL(inode) ? NULL
-                                : AFS_FS_I(inode));
+               trace_afs_lookup(dvnode, &dentry->d_name, &fid);
        }
        return d;
 }
index 4150280509fff56775318ec3007a4f03c5b95454..7503899c0a1b52e2806dc7c76aebbeb4b6e667ee 100644 (file)
@@ -136,6 +136,9 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
 
        ASSERTCMP(d_inode(dentry), ==, NULL);
 
+       if (flags & LOOKUP_CREATE)
+               return ERR_PTR(-EOPNOTSUPP);
+
        if (dentry->d_name.len >= AFSNAMEMAX) {
                _leave(" = -ENAMETOOLONG");
                return ERR_PTR(-ENAMETOOLONG);
index f532d6d3bd28c176b69fd42ad431bdea579adfad..79bc5f1338edfb51e98f67ca47e167c32c903f18 100644 (file)
@@ -126,7 +126,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
                if (src_as->cell)
                        ctx->cell = afs_get_cell(src_as->cell);
 
-               if (size > PAGE_SIZE - 1)
+               if (size < 2 || size > PAGE_SIZE - 1)
                        return -EINVAL;
 
                page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
@@ -140,7 +140,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
                }
 
                buf = kmap(page);
-               ret = vfs_parse_fs_string(fc, "source", buf, size);
+               ret = -EINVAL;
+               if (buf[size - 1] == '.')
+                       ret = vfs_parse_fs_string(fc, "source", buf, size - 1);
                kunmap(page);
                put_page(page);
                if (ret < 0)
index fba2ec3a3a9c904a8d592c3a4141e9f9214eb385..468e1713bce13944719bdae1cc13611d0693c343 100644 (file)
@@ -213,13 +213,14 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
 
        /* Display header on line 1 */
        if (v == &cell->proc_volumes) {
-               seq_puts(m, "USE VID      TY\n");
+               seq_puts(m, "USE VID      TY NAME\n");
                return 0;
        }
 
-       seq_printf(m, "%3d %08llx %s\n",
+       seq_printf(m, "%3d %08llx %s %s\n",
                   atomic_read(&vol->usage), vol->vid,
-                  afs_vol_types[vol->type]);
+                  afs_vol_types[vol->type],
+                  vol->name);
 
        return 0;
 }
index 1686bf188ccd056c7453db63557efddceaf08833..b7f3cb2130caee38a6458d3d81422f83637698f7 100644 (file)
@@ -32,18 +32,11 @@ static void afs_dec_servers_outstanding(struct afs_net *net)
 struct afs_server *afs_find_server(struct afs_net *net,
                                   const struct sockaddr_rxrpc *srx)
 {
-       const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
        const struct afs_addr_list *alist;
        struct afs_server *server = NULL;
        unsigned int i;
-       bool ipv6 = true;
        int seq = 0, diff;
 
-       if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 ||
-           srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 ||
-           srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff))
-               ipv6 = false;
-
        rcu_read_lock();
 
        do {
@@ -52,7 +45,8 @@ struct afs_server *afs_find_server(struct afs_net *net,
                server = NULL;
                read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
 
-               if (ipv6) {
+               if (srx->transport.family == AF_INET6) {
+                       const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
                        hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
                                alist = rcu_dereference(server->addresses);
                                for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
@@ -68,15 +62,16 @@ struct afs_server *afs_find_server(struct afs_net *net,
                                }
                        }
                } else {
+                       const struct sockaddr_in *a = &srx->transport.sin, *b;
                        hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
                                alist = rcu_dereference(server->addresses);
                                for (i = 0; i < alist->nr_ipv4; i++) {
-                                       b = &alist->addrs[i].transport.sin6;
-                                       diff = ((u16 __force)a->sin6_port -
-                                               (u16 __force)b->sin6_port);
+                                       b = &alist->addrs[i].transport.sin;
+                                       diff = ((u16 __force)a->sin_port -
+                                               (u16 __force)b->sin_port);
                                        if (diff == 0)
-                                               diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
-                                                       (u32 __force)b->sin6_addr.s6_addr32[3]);
+                                               diff = ((u32 __force)a->sin_addr.s_addr -
+                                                       (u32 __force)b->sin_addr.s_addr);
                                        if (diff == 0)
                                                goto found;
                                }
index 488641b1a418d1b0f27aa3d17925b9e5a1a2cff2..7f8a9b3137bff33f24ff4eb058a5f77cf01c4244 100644 (file)
@@ -404,6 +404,7 @@ static int afs_test_super(struct super_block *sb, struct fs_context *fc)
        return (as->net_ns == fc->net_ns &&
                as->volume &&
                as->volume->vid == ctx->volume->vid &&
+               as->cell == ctx->cell &&
                !as->dyn_root);
 }
 
@@ -448,7 +449,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
        /* allocate the root inode and dentry */
        if (as->dyn_root) {
                inode = afs_iget_pseudo_dir(sb, true);
-               sb->s_flags     |= SB_RDONLY;
        } else {
                sprintf(sb->s_id, "%llu", as->volume->vid);
                afs_activate_volume(as->volume);
index 75b6d10c984560c437a91ebed744b9f3d5510c4b..575636f6491ef6d887180f9fc3c36740d410083c 100644 (file)
@@ -7,6 +7,7 @@ config BTRFS_FS
        select LIBCRC32C
        select CRYPTO_XXHASH
        select CRYPTO_SHA256
+       select CRYPTO_BLAKE2B
        select ZLIB_INFLATE
        select ZLIB_DEFLATE
        select LZO_COMPRESS
index ee834ef7beb4a375884f91c260c69246412f012e..43e1660f450f01ba8b6326d54e1bfdc6162a8080 100644 (file)
@@ -447,7 +447,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
        if (blkcg_css) {
                bio->bi_opf |= REQ_CGROUP_PUNT;
-               bio_associate_blkg_from_css(bio, blkcg_css);
+               kthread_associate_blkcg(blkcg_css);
        }
        refcount_set(&cb->pending_bios, 1);
 
@@ -491,6 +491,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                        bio->bi_opf = REQ_OP_WRITE | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
+                       if (blkcg_css)
+                               bio->bi_opf |= REQ_CGROUP_PUNT;
                        bio_add_page(bio, page, PAGE_SIZE, 0);
                }
                if (bytes_left < PAGE_SIZE) {
@@ -517,6 +519,9 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                bio_endio(bio);
        }
 
+       if (blkcg_css)
+               kthread_associate_blkcg(NULL);
+
        return 0;
 }
 
index 5b6e86aaf2e1a42f468c0dbb94e8231d12973d1e..24658b5a578708f8b649a634e0503545e3930881 100644 (file)
@@ -379,7 +379,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
        for (node = rb_first(tm_root); node; node = next) {
                next = rb_next(node);
                tm = rb_entry(node, struct tree_mod_elem, node);
-               if (tm->seq > min_seq)
+               if (tm->seq >= min_seq)
                        continue;
                rb_erase(node, tm_root);
                kfree(tm);
index b2e8fd8a8e59922e81b5d27bbb04633e8d9ef297..54efb21c27272685c11dc70185ef0a48c5146dd1 100644 (file)
@@ -2787,7 +2787,7 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
 /* file-item.c */
 struct btrfs_dio_private;
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
-                   struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
+                   struct btrfs_root *root, u64 bytenr, u64 len);
 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
                                   u8 *dst);
 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
index f639dde2a679036d6d150264158d46986b6981ec..ba4d8f375b3c1cdcbcc64ded7491fa5749503ae6 100644 (file)
@@ -500,11 +500,8 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
                              &dev_replace->scrub_progress, 0, 1);
 
        ret = btrfs_dev_replace_finishing(fs_info, ret);
-       if (ret == -EINPROGRESS) {
+       if (ret == -EINPROGRESS)
                ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
-       } else if (ret != -ECANCELED) {
-               WARN_ON(ret);
-       }
 
        return ret;
 
index 153f71a5bba91975fc17e528769b8992016819d9..274318e9114eee4cdd95108d6fb309c16fdd4678 100644 (file)
@@ -1869,8 +1869,8 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
                btrfs_pin_extent(fs_info, head->bytenr,
                                 head->num_bytes, 1);
                if (head->is_data) {
-                       ret = btrfs_del_csums(trans, fs_info, head->bytenr,
-                                             head->num_bytes);
+                       ret = btrfs_del_csums(trans, fs_info->csum_root,
+                                             head->bytenr, head->num_bytes);
                }
        }
 
@@ -3175,7 +3175,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                btrfs_release_path(path);
 
                if (is_data) {
-                       ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
+                       ret = btrfs_del_csums(trans, info->csum_root, bytenr,
+                                             num_bytes);
                        if (ret) {
                                btrfs_abort_transaction(trans, ret);
                                goto out;
@@ -3799,6 +3800,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
                                u64 flags, int delalloc)
 {
        int ret = 0;
+       int cache_block_group_error = 0;
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group *block_group = NULL;
        struct find_free_extent_ctl ffe_ctl = {0};
@@ -3958,7 +3960,20 @@ have_block_group:
                if (unlikely(!ffe_ctl.cached)) {
                        ffe_ctl.have_caching_bg = true;
                        ret = btrfs_cache_block_group(block_group, 0);
-                       BUG_ON(ret < 0);
+
+                       /*
+                        * If we get ENOMEM here or something else we want to
+                        * try other block groups, because it may not be fatal.
+                        * However if we can't find anything else we need to
+                        * save our return here so that we return the actual
+                        * error that caused problems, not ENOSPC.
+                        */
+                       if (ret < 0) {
+                               if (!cache_block_group_error)
+                                       cache_block_group_error = ret;
+                               ret = 0;
+                               goto loop;
+                       }
                        ret = 0;
                }
 
@@ -4045,7 +4060,7 @@ loop:
        if (ret > 0)
                goto search;
 
-       if (ret == -ENOSPC) {
+       if (ret == -ENOSPC && !cache_block_group_error) {
                /*
                 * Use ffe_ctl->total_free_space as fallback if we can't find
                 * any contiguous hole.
@@ -4056,6 +4071,8 @@ loop:
                space_info->max_extent_size = ffe_ctl.max_extent_size;
                spin_unlock(&space_info->lock);
                ins->offset = ffe_ctl.max_extent_size;
+       } else if (ret == -ENOSPC) {
+               ret = cache_block_group_error;
        }
        return ret;
 }
index eb8bd0258360f95d7d62148b122cd07eadfcbc90..2f4802f405a246e71d9ac2602b98ef494a535f0e 100644 (file)
@@ -5074,12 +5074,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
                return eb;
        eb = alloc_dummy_extent_buffer(fs_info, start);
        if (!eb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        eb->fs_info = fs_info;
 again:
        ret = radix_tree_preload(GFP_NOFS);
-       if (ret)
+       if (ret) {
+               exists = ERR_PTR(ret);
                goto free_eb;
+       }
        spin_lock(&fs_info->buffer_lock);
        ret = radix_tree_insert(&fs_info->buffer_radix,
                                start >> PAGE_SHIFT, eb);
index 3270a40b0777bf3c0e68b917f25155ac09d1eb8f..b1bfdc5c1387aeaf2a449d702698da14206e3b72 100644 (file)
@@ -590,9 +590,9 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
  * range of bytes.
  */
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
-                   struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
+                   struct btrfs_root *root, u64 bytenr, u64 len)
 {
-       struct btrfs_root *root = fs_info->csum_root;
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_path *path;
        struct btrfs_key key;
        u64 end_byte = bytenr + len;
@@ -602,6 +602,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
        u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
        int blocksize_bits = fs_info->sb->s_blocksize_bits;
 
+       ASSERT(root == fs_info->csum_root ||
+              root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
index 0cb43b6827897b6569a6d9f7c1d555bf8afa0347..8d47c76b7bd1073dea37b7d945211f311c59642a 100644 (file)
@@ -2599,8 +2599,8 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path,
                        }
                }
 
-               if (clone_info) {
-                       u64 clone_len = drop_end - cur_offset;
+               if (clone_info && drop_end > clone_info->file_offset) {
+                       u64 clone_len = drop_end - clone_info->file_offset;
 
                        ret = btrfs_insert_clone_extent(trans, inode, path,
                                                        clone_info, clone_len);
index 56032c518b267a5b3ce3875b0e4081e1bcbd761c..c70baafb2a3920ced47005c3d60b11c07c89af88 100644 (file)
@@ -1479,10 +1479,10 @@ next_slot:
                        disk_num_bytes =
                                btrfs_file_extent_disk_num_bytes(leaf, fi);
                        /*
-                        * If extent we got ends before our range starts, skip
-                        * to next extent
+                        * If the extent we got ends before our current offset,
+                        * skip to the next extent.
                         */
-                       if (extent_end <= start) {
+                       if (extent_end <= cur_offset) {
                                path->slots[0]++;
                                goto next_slot;
                        }
@@ -4238,18 +4238,30 @@ out:
 }
 
 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
-                              struct inode *dir, u64 objectid,
-                              const char *name, int name_len)
+                              struct inode *dir, struct dentry *dentry)
 {
        struct btrfs_root *root = BTRFS_I(dir)->root;
+       struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_dir_item *di;
        struct btrfs_key key;
+       const char *name = dentry->d_name.name;
+       int name_len = dentry->d_name.len;
        u64 index;
        int ret;
+       u64 objectid;
        u64 dir_ino = btrfs_ino(BTRFS_I(dir));
 
+       if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
+               objectid = inode->root->root_key.objectid;
+       } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
+               objectid = inode->location.objectid;
+       } else {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -4271,13 +4283,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(path);
 
-       ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
-                                dir_ino, &index, name, name_len);
-       if (ret < 0) {
-               if (ret != -ENOENT) {
-                       btrfs_abort_transaction(trans, ret);
-                       goto out;
-               }
+       /*
+        * This is a placeholder inode for a subvolume we didn't have a
+        * reference to at the time of the snapshot creation.  In the meantime
+        * we could have renamed the real subvol link into our snapshot, so
+        * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
+        * Instead simply lookup the dir_index_item for this entry so we can
+        * remove it.  Otherwise we know we have a ref to the root and we can
+        * call btrfs_del_root_ref, and it _shouldn't_ fail.
+        */
+       if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
                di = btrfs_search_dir_index_item(root, path, dir_ino,
                                                 name, name_len);
                if (IS_ERR_OR_NULL(di)) {
@@ -4292,8 +4307,16 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
                index = key.offset;
+               btrfs_release_path(path);
+       } else {
+               ret = btrfs_del_root_ref(trans, objectid,
+                                        root->root_key.objectid, dir_ino,
+                                        &index, name, name_len);
+               if (ret) {
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
        }
-       btrfs_release_path(path);
 
        ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
        if (ret) {
@@ -4487,8 +4510,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
 
        btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
 
-       ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
-                                 dentry->d_name.name, dentry->d_name.len);
+       ret = btrfs_unlink_subvol(trans, dir, dentry);
        if (ret) {
                err = ret;
                btrfs_abort_transaction(trans, ret);
@@ -4583,10 +4605,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
                return PTR_ERR(trans);
 
        if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
-               err = btrfs_unlink_subvol(trans, dir,
-                                         BTRFS_I(inode)->location.objectid,
-                                         dentry->d_name.name,
-                                         dentry->d_name.len);
+               err = btrfs_unlink_subvol(trans, dir, dentry);
                goto out;
        }
 
@@ -5728,7 +5747,6 @@ static void inode_tree_add(struct inode *inode)
 
 static void inode_tree_del(struct inode *inode)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int empty = 0;
 
@@ -5741,7 +5759,6 @@ static void inode_tree_del(struct inode *inode)
        spin_unlock(&root->inode_lock);
 
        if (empty && btrfs_root_refs(&root->root_item) == 0) {
-               synchronize_srcu(&fs_info->subvol_srcu);
                spin_lock(&root->inode_lock);
                empty = RB_EMPTY_ROOT(&root->inode_tree);
                spin_unlock(&root->inode_lock);
@@ -9538,7 +9555,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
        u64 old_idx = 0;
        u64 new_idx = 0;
-       u64 root_objectid;
        int ret;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
@@ -9556,9 +9572,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        btrfs_init_log_ctx(&ctx_dest, new_inode);
 
        /* close the race window with snapshot create/destroy ioctl */
-       if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
-               down_read(&fs_info->subvol_sem);
-       if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
+       if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
+           new_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&fs_info->subvol_sem);
 
        /*
@@ -9645,10 +9660,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        /* src is a subvolume */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
-               root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
-               ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
-                                         old_dentry->d_name.name,
-                                         old_dentry->d_name.len);
+               ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
        } else { /* src is an inode */
                ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
                                           BTRFS_I(old_dentry->d_inode),
@@ -9664,10 +9676,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        /* dest is a subvolume */
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
-               root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
-               ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
-                                         new_dentry->d_name.name,
-                                         new_dentry->d_name.len);
+               ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
        } else { /* dest is an inode */
                ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
                                           BTRFS_I(new_dentry->d_inode),
@@ -9792,9 +9801,8 @@ out_fail:
                ret = ret ? ret : ret2;
        }
 out_notrans:
-       if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
-               up_read(&fs_info->subvol_sem);
-       if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
+       if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
+           old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
 
        ASSERT(list_empty(&ctx_root.list));
@@ -9866,7 +9874,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct inode *new_inode = d_inode(new_dentry);
        struct inode *old_inode = d_inode(old_dentry);
        u64 index = 0;
-       u64 root_objectid;
        int ret;
        u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
        bool log_pinned = false;
@@ -9974,10 +9981,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                                BTRFS_I(old_inode), 1);
 
        if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
-               root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
-               ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
-                                       old_dentry->d_name.name,
-                                       old_dentry->d_name.len);
+               ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
        } else {
                ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
                                        BTRFS_I(d_inode(old_dentry)),
@@ -9996,10 +10000,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_inode->i_ctime = current_time(new_inode);
                if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
                             BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
-                       root_objectid = BTRFS_I(new_inode)->location.objectid;
-                       ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
-                                               new_dentry->d_name.name,
-                                               new_dentry->d_name.len);
+                       ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
                        BUG_ON(new_inode->i_nlink == 0);
                } else {
                        ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
index a1ee0b775e652b16f9abe3134e3ae2226ba3ab9c..12ae31e1813e4e0a02079faa48daa7be86ab8dea 100644 (file)
@@ -704,11 +704,17 @@ static noinline int create_subvol(struct inode *dir,
 
        btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
        ret = btrfs_update_inode(trans, root, dir);
-       BUG_ON(ret);
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               goto fail;
+       }
 
        ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
                                 btrfs_ino(BTRFS_I(dir)), index, name, namelen);
-       BUG_ON(ret);
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               goto fail;
+       }
 
        ret = btrfs_uuid_tree_add(trans, root_item->uuid,
                                  BTRFS_UUID_KEY_SUBVOL, objectid);
@@ -3720,24 +3726,18 @@ process_slot:
        ret = 0;
 
        if (last_dest_end < destoff + len) {
-               struct btrfs_clone_extent_info clone_info = { 0 };
                /*
-                * We have an implicit hole (NO_HOLES feature is enabled) that
-                * fully or partially overlaps our cloning range at its end.
+                * We have an implicit hole that fully or partially overlaps our
+                * cloning range at its end. This means that we either have the
+                * NO_HOLES feature enabled or the implicit hole happened due to
+                * mixing buffered and direct IO writes against this file.
                 */
                btrfs_release_path(path);
                path->leave_spinning = 0;
 
-               /*
-                * We are dealing with a hole and our clone_info already has a
-                * disk_offset of 0, we only need to fill the data length and
-                * file offset.
-                */
-               clone_info.data_len = destoff + len - last_dest_end;
-               clone_info.file_offset = last_dest_end;
                ret = btrfs_punch_hole_range(inode, path,
                                             last_dest_end, destoff + len - 1,
-                                            &clone_info, &trans);
+                                            NULL, &trans);
                if (ret)
                        goto out;
 
@@ -4252,7 +4252,19 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
                              &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
                              0);
 
-       if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
+       /*
+        * Copy scrub args to user space even if btrfs_scrub_dev() returned an
+        * error. This is important as it allows user space to know how much
+        * progress scrub has done. For example, if scrub is canceled we get
+        * -ECANCELED from btrfs_scrub_dev() and return that error back to user
+        * space. Later user space can inspect the progress from the structure
+        * btrfs_ioctl_scrub_args and resume scrub from where it left off
+        * previously (btrfs-progs does this).
+        * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
+        * then return -EFAULT to signal the structure was not copied or it may
+        * be corrupt and unreliable due to a partial copy.
+        */
+       if (copy_to_user(arg, sa, sizeof(*sa)))
                ret = -EFAULT;
 
        if (!(sa->flags & BTRFS_SCRUB_READONLY))
index 93aeb2e539a4cf6758202b4137a4b595115707a5..39fc8c3d3a75df2387e43c7dec3aae9d90e59350 100644 (file)
@@ -2423,8 +2423,12 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
        u64 nr_old_roots = 0;
        int ret = 0;
 
+       /*
+        * If quotas get disabled meanwhile, the resouces need to be freed and
+        * we can't just exit here.
+        */
        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
-               return 0;
+               goto out_free;
 
        if (new_roots) {
                if (!maybe_fs_roots(new_roots))
@@ -3232,12 +3236,12 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
                if (!(fs_info->qgroup_flags &
                      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
                        btrfs_warn(fs_info,
-                       "qgroup rescan init failed, qgroup is not enabled");
+                       "qgroup rescan init failed, qgroup rescan is not queued");
                        ret = -EINVAL;
                } else if (!(fs_info->qgroup_flags &
                             BTRFS_QGROUP_STATUS_FLAG_ON)) {
                        btrfs_warn(fs_info,
-                       "qgroup rescan init failed, qgroup rescan is not queued");
+                       "qgroup rescan init failed, qgroup is not enabled");
                        ret = -EINVAL;
                }
 
index d897a8e5e430e5c35702bdb05f227ba49846ab0b..da5abd62db2232d48e128e6b37123f4bbfbd9072 100644 (file)
@@ -517,6 +517,34 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
        return 1;
 }
 
+static bool reloc_root_is_dead(struct btrfs_root *root)
+{
+       /*
+        * Pair with set_bit/clear_bit in clean_dirty_subvols and
+        * btrfs_update_reloc_root. We need to see the updated bit before
+        * trying to access reloc_root
+        */
+       smp_rmb();
+       if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+               return true;
+       return false;
+}
+
+/*
+ * Check if this subvolume tree has valid reloc tree.
+ *
+ * Reloc tree after swap is considered dead, thus not considered as valid.
+ * This is enough for most callers, as they don't distinguish dead reloc root
+ * from no reloc root.  But should_ignore_root() below is a special case.
+ */
+static bool have_reloc_root(struct btrfs_root *root)
+{
+       if (reloc_root_is_dead(root))
+               return false;
+       if (!root->reloc_root)
+               return false;
+       return true;
+}
 
 static int should_ignore_root(struct btrfs_root *root)
 {
@@ -525,6 +553,10 @@ static int should_ignore_root(struct btrfs_root *root)
        if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                return 0;
 
+       /* This root has been merged with its reloc tree, we can ignore it */
+       if (reloc_root_is_dead(root))
+               return 1;
+
        reloc_root = root->reloc_root;
        if (!reloc_root)
                return 0;
@@ -1439,7 +1471,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
         * The subvolume has reloc tree but the swap is finished, no need to
         * create/update the dead reloc tree
         */
-       if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+       if (reloc_root_is_dead(root))
                return 0;
 
        if (root->reloc_root) {
@@ -1478,8 +1510,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
        struct btrfs_root_item *root_item;
        int ret;
 
-       if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
-           !root->reloc_root)
+       if (!have_reloc_root(root))
                goto out;
 
        reloc_root = root->reloc_root;
@@ -1489,6 +1520,11 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
        if (fs_info->reloc_ctl->merge_reloc_tree &&
            btrfs_root_refs(root_item) == 0) {
                set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
+               /*
+                * Mark the tree as dead before we change reloc_root so
+                * have_reloc_root will not touch it from now on.
+                */
+               smp_wmb();
                __del_reloc_root(reloc_root);
        }
 
@@ -2201,6 +2237,11 @@ static int clean_dirty_subvols(struct reloc_control *rc)
                                if (ret2 < 0 && !ret)
                                        ret = ret2;
                        }
+                       /*
+                        * Need barrier to ensure clear_bit() only happens after
+                        * root->reloc_root = NULL. Pairs with have_reloc_root.
+                        */
+                       smp_wmb();
                        clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
                        btrfs_put_fs_root(root);
                } else {
@@ -4552,6 +4593,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
                fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
                if (IS_ERR(fs_root)) {
                        err = PTR_ERR(fs_root);
+                       list_add_tail(&reloc_root->root_list, &reloc_roots);
                        goto out_free;
                }
 
@@ -4717,7 +4759,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
        struct btrfs_root *root = pending->root;
        struct reloc_control *rc = root->fs_info->reloc_ctl;
 
-       if (!root->reloc_root || !rc)
+       if (!rc || !have_reloc_root(root))
                return;
 
        if (!rc->merge_reloc_tree)
@@ -4751,7 +4793,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
        struct reloc_control *rc = root->fs_info->reloc_ctl;
        int ret;
 
-       if (!root->reloc_root || !rc)
+       if (!rc || !have_reloc_root(root))
                return 0;
 
        rc = root->fs_info->reloc_ctl;
index 3b17b647d002f2c5cb747ade373500a0921f08b4..612411c74550f5d1776c318269249abb0b7953fc 100644 (file)
@@ -376,11 +376,13 @@ again:
                leaf = path->nodes[0];
                ref = btrfs_item_ptr(leaf, path->slots[0],
                                     struct btrfs_root_ref);
-
-               WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
-               WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
                ptr = (unsigned long)(ref + 1);
-               WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
+               if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
+                   (btrfs_root_ref_name_len(leaf, ref) != name_len) ||
+                   memcmp_extent_buffer(leaf, name, ptr, name_len)) {
+                       err = -ENOENT;
+                       goto out;
+               }
                *sequence = btrfs_root_ref_sequence(leaf, ref);
 
                ret = btrfs_del_item(trans, tree_root, path);
index 21de630b073024c343d75835c3a61ba0922aac7b..fd266a2d15ec84959189521312827350b6795ffe 100644 (file)
@@ -3577,17 +3577,27 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 * This can easily boost the amount of SYSTEM chunks if cleaner
                 * thread can't be triggered fast enough, and use up all space
                 * of btrfs_super_block::sys_chunk_array
+                *
+                * While for dev replace, we need to try our best to mark block
+                * group RO, to prevent race between:
+                * - Write duplication
+                *   Contains latest data
+                * - Scrub copy
+                *   Contains data from commit tree
+                *
+                * If target block group is not marked RO, nocow writes can
+                * be overwritten by scrub copy, causing data corruption.
+                * So for dev-replace, it's not allowed to continue if a block
+                * group is not RO.
                 */
-               ret = btrfs_inc_block_group_ro(cache, false);
-               scrub_pause_off(fs_info);
-
+               ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
                if (ret == 0) {
                        ro_set = 1;
-               } else if (ret == -ENOSPC) {
+               } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
                        /*
                         * btrfs_inc_block_group_ro return -ENOSPC when it
                         * failed in creating new chunk for metadata.
-                        * It is not a problem for scrub/replace, because
+                        * It is not a problem for scrub, because
                         * metadata are always cowed, and our scrub paused
                         * commit_transactions.
                         */
@@ -3596,9 +3606,22 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        btrfs_warn(fs_info,
                                   "failed setting block group ro: %d", ret);
                        btrfs_put_block_group(cache);
+                       scrub_pause_off(fs_info);
                        break;
                }
 
+               /*
+                * Now the target block is marked RO, wait for nocow writes to
+                * finish before dev-replace.
+                * COW is fine, as COW never overwrites extents in commit tree.
+                */
+               if (sctx->is_dev_replace) {
+                       btrfs_wait_nocow_writers(cache);
+                       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
+                                       cache->length);
+               }
+
+               scrub_pause_off(fs_info);
                down_write(&dev_replace->rwsem);
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
index ae2db5eb15499f54b986584f5218e1e257eceb47..091e5bc8c7ea93544bd58cefcb8318aa2fe43a44 100644 (file)
@@ -7083,12 +7083,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
        send_root->send_in_progress++;
        spin_unlock(&send_root->root_item_lock);
 
-       /*
-        * This is done when we lookup the root, it should already be complete
-        * by the time we get here.
-        */
-       WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
-
        /*
         * Userspace tools do the checks and warn the user if it's
         * not RO.
index 1a846bf6e197b3f6bde759ead2735841b1a78700..914eea5ba6a7adec8d97a76b6971adfa3408b20a 100644 (file)
@@ -452,9 +452,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
        root->fs_info->tree_root = root;
 
        root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
-       if (!root->node) {
+       if (IS_ERR(root->node)) {
                test_std_err(TEST_ALLOC_EXTENT_BUFFER);
-               ret = -ENOMEM;
+               ret = PTR_ERR(root->node);
                goto out;
        }
        btrfs_set_header_level(root->node, 0);
index 09aaca1efd62ae9609cf72d897a907e915bcd02d..ac035a6fa003dda0d9f4820f905e0a58df26d1f1 100644 (file)
@@ -484,9 +484,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
         * *cough*backref walking code*cough*
         */
        root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
-       if (!root->node) {
+       if (IS_ERR(root->node)) {
                test_err("couldn't allocate dummy buffer");
-               ret = -ENOMEM;
+               ret = PTR_ERR(root->node);
                goto out;
        }
        btrfs_set_header_level(root->node, 0);
index 493d4d9e0f79c8292b4efc6c3b5632ed91b95cb5..97f3520b8d98d29b61abc27a6751979854f1c349 100644 (file)
@@ -227,7 +227,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
         */
        if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) {
                file_extent_err(leaf, slot,
-                               "invalid item size, have %u expect [%lu, %u)",
+                               "invalid item size, have %u expect [%zu, %u)",
                                item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
                                SZ_4K);
                return -EUCLEAN;
@@ -332,7 +332,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
 }
 
 static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
-                          int slot)
+                          int slot, struct btrfs_key *prev_key)
 {
        struct btrfs_fs_info *fs_info = leaf->fs_info;
        u32 sectorsize = fs_info->sectorsize;
@@ -356,6 +356,20 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
                        btrfs_item_size_nr(leaf, slot), csumsize);
                return -EUCLEAN;
        }
+       if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) {
+               u64 prev_csum_end;
+               u32 prev_item_size;
+
+               prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
+               prev_csum_end = (prev_item_size / csumsize) * sectorsize;
+               prev_csum_end += prev_key->offset;
+               if (prev_csum_end > key->offset) {
+                       generic_err(leaf, slot - 1,
+"csum end range (%llu) goes beyond the start range (%llu) of the next csum item",
+                                   prev_csum_end, key->offset);
+                       return -EUCLEAN;
+               }
+       }
        return 0;
 }
 
@@ -1355,7 +1369,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
                ret = check_extent_data_item(leaf, key, slot, prev_key);
                break;
        case BTRFS_EXTENT_CSUM_KEY:
-               ret = check_csum_item(leaf, key, slot);
+               ret = check_csum_item(leaf, key, slot, prev_key);
                break;
        case BTRFS_DIR_ITEM_KEY:
        case BTRFS_DIR_INDEX_KEY:
index 6f757361db533ec6cbd306cf5b92cb0e1c96c771..d3f115909ff05a7deb495f69f3261624e92b2f91 100644 (file)
@@ -808,7 +808,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                                struct btrfs_ordered_sum,
                                                list);
                                if (!ret)
-                                       ret = btrfs_del_csums(trans, fs_info,
+                                       ret = btrfs_del_csums(trans,
+                                                             fs_info->csum_root,
                                                              sums->bytenr,
                                                              sums->len);
                                if (!ret)
@@ -3909,6 +3910,28 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+static int log_csums(struct btrfs_trans_handle *trans,
+                    struct btrfs_root *log_root,
+                    struct btrfs_ordered_sum *sums)
+{
+       int ret;
+
+       /*
+        * Due to extent cloning, we might have logged a csum item that covers a
+        * subrange of a cloned extent, and later we can end up logging a csum
+        * item for a larger subrange of the same extent or the entire range.
+        * This would leave csum items in the log tree that cover the same range
+        * and break the searches for checksums in the log tree, resulting in
+        * some checksums missing in the fs/subvolume tree. So just delete (or
+        * trim and adjust) any existing csum items in the log for this range.
+        */
+       ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
+       if (ret)
+               return ret;
+
+       return btrfs_csum_file_blocks(trans, log_root, sums);
+}
+
 static noinline int copy_items(struct btrfs_trans_handle *trans,
                               struct btrfs_inode *inode,
                               struct btrfs_path *dst_path,
@@ -4054,7 +4077,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                                   struct btrfs_ordered_sum,
                                                   list);
                if (!ret)
-                       ret = btrfs_csum_file_blocks(trans, log, sums);
+                       ret = log_csums(trans, log, sums);
                list_del(&sums->list);
                kfree(sums);
        }
@@ -4274,7 +4297,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
                                                   struct btrfs_ordered_sum,
                                                   list);
                if (!ret)
-                       ret = btrfs_csum_file_blocks(trans, log_root, sums);
+                       ret = log_csums(trans, log_root, sums);
                list_del(&sums->list);
                kfree(sums);
        }
@@ -6294,9 +6317,28 @@ again:
                wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
                if (IS_ERR(wc.replay_dest)) {
                        ret = PTR_ERR(wc.replay_dest);
+
+                       /*
+                        * We didn't find the subvol, likely because it was
+                        * deleted.  This is ok, simply skip this log and go to
+                        * the next one.
+                        *
+                        * We need to exclude the root because we can't have
+                        * other log replays overwriting this log as we'll read
+                        * it back in a few more times.  This will keep our
+                        * block from being modified, and we'll just bail for
+                        * each subsequent pass.
+                        */
+                       if (ret == -ENOENT)
+                               ret = btrfs_pin_extent_for_log_replay(fs_info,
+                                                       log->node->start,
+                                                       log->node->len);
                        free_extent_buffer(log->node);
                        free_extent_buffer(log->commit_root);
                        kfree(log);
+
+                       if (!ret)
+                               goto next;
                        btrfs_handle_fs_error(fs_info, ret,
                                "Couldn't read target root for tree log recovery.");
                        goto error;
@@ -6328,7 +6370,6 @@ again:
                                                  &root->highest_objectid);
                }
 
-               key.offset = found_key.offset - 1;
                wc.replay_dest->log_root = NULL;
                free_extent_buffer(log->node);
                free_extent_buffer(log->commit_root);
@@ -6336,9 +6377,10 @@ again:
 
                if (ret)
                        goto error;
-
+next:
                if (found_key.offset == 0)
                        break;
+               key.offset = found_key.offset - 1;
        }
        btrfs_release_path(path);
 
index 91caab63bdf521e5e16f30cd0e8428fb7b242a3b..76b84f2397b1bc75167e6db1d433674c0620760f 100644 (file)
@@ -324,6 +324,8 @@ again_search_slot:
                                }
                                if (ret < 0 && ret != -ENOENT)
                                        goto out;
+                               key.offset++;
+                               goto again_search_slot;
                        }
                        item_size -= sizeof(subid_le);
                        offset += sizeof(subid_le);
index d8e5560db285a0bbee1e7ce5e52d224ce35b08ab..9b78e720c6973c6e141fff069e212cd388fd58f8 100644 (file)
@@ -61,7 +61,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        [BTRFS_RAID_RAID1C3] = {
                .sub_stripes    = 1,
                .dev_stripes    = 1,
-               .devs_max       = 0,
+               .devs_max       = 3,
                .devs_min       = 3,
                .tolerated_failures = 2,
                .devs_increment = 3,
@@ -73,7 +73,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        [BTRFS_RAID_RAID1C4] = {
                .sub_stripes    = 1,
                .dev_stripes    = 1,
-               .devs_max       = 0,
+               .devs_max       = 4,
                .devs_min       = 4,
                .tolerated_failures = 3,
                .devs_increment = 4,
@@ -3881,7 +3881,11 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
                }
        }
 
-       num_devices = btrfs_num_devices(fs_info);
+       /*
+        * rw_devices will not change at the moment, device add/delete/replace
+        * are excluded by EXCL_OP
+        */
+       num_devices = fs_info->fs_devices->rw_devices;
 
        /*
         * SINGLE profile on-disk has no profile bit, but in-memory we have a
index fc1b564b9cfe57c2d2e5fea01ffab722169854e4..0ee5386926faf178c6fab3fa143be2bd5c1484e4 100644 (file)
@@ -168,7 +168,7 @@ btrfs_device_set_##name(struct btrfs_device *dev, u64 size)         \
        write_seqcount_end(&dev->data_seqcount);                        \
        preempt_enable();                                               \
 }
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
 #define BTRFS_DEVICE_GETSET_FUNCS(name)                                        \
 static inline u64                                                      \
 btrfs_device_get_##name(const struct btrfs_device *dev)                        \
index d8c7242426bb349782c9c7b7fd106ea4b8f12eb5..b8d28370cfd7f25cb5ed6e44fa7a073547ba5319 100644 (file)
@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
 
 void invalidate_bh_lrus(void)
 {
-       on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
+       on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
@@ -3031,11 +3031,9 @@ static void end_bio_bh_io_sync(struct bio *bio)
  * errors, this only handles the "we need to be able to
  * do IO at the final sector" case.
  */
-void guard_bio_eod(int op, struct bio *bio)
+void guard_bio_eod(struct bio *bio)
 {
        sector_t maxsector;
-       struct bio_vec *bvec = bio_last_bvec_all(bio);
-       unsigned truncated_bytes;
        struct hd_struct *part;
 
        rcu_read_lock();
@@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio)
        if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
                return;
 
-       /* Uhhuh. We've got a bio that straddles the device size! */
-       truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
-
-       /*
-        * The bio contains more than one segment which spans EOD, just return
-        * and let IO layer turn it into an EIO
-        */
-       if (truncated_bytes > bvec->bv_len)
-               return;
-
-       /* Truncate the bio.. */
-       bio->bi_iter.bi_size -= truncated_bytes;
-       bvec->bv_len -= truncated_bytes;
-
-       /* ..and clear the end of the buffer for reads */
-       if (op == REQ_OP_READ) {
-               struct bio_vec bv;
-
-               mp_bvec_last_segment(bvec, &bv);
-               zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
-                               truncated_bytes);
-       }
+       bio_truncate(bio, maxsector << 9);
 }
 
 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@@ -3118,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
 
-       /* Take care of bh's that straddle the end of the device */
-       guard_bio_eod(op, bio);
-
        if (buffer_meta(bh))
                op_flags |= REQ_META;
        if (buffer_prio(bh))
                op_flags |= REQ_PRIO;
        bio_set_op_attrs(bio, op, op_flags);
 
+       /* Take care of bh's that straddle the end of the device */
+       guard_bio_eod(bio);
+
        if (wbc) {
                wbc_init_bio(wbc, bio);
                wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
index f5a38910a82bfb94d71ceab28af196b3d0165c6f..9d09bb53c1ab4a36268eb583690ef37b2f08387c 100644 (file)
@@ -1011,18 +1011,13 @@ static int __ceph_is_single_caps(struct ceph_inode_info *ci)
        return rb_first(&ci->i_caps) == rb_last(&ci->i_caps);
 }
 
-static int __ceph_is_any_caps(struct ceph_inode_info *ci)
-{
-       return !RB_EMPTY_ROOT(&ci->i_caps);
-}
-
 int ceph_is_any_caps(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int ret;
 
        spin_lock(&ci->i_ceph_lock);
-       ret = __ceph_is_any_caps(ci);
+       ret = __ceph_is_any_real_caps(ci);
        spin_unlock(&ci->i_ceph_lock);
 
        return ret;
@@ -1099,15 +1094,16 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
        if (removed)
                ceph_put_cap(mdsc, cap);
 
-       /* when reconnect denied, we remove session caps forcibly,
-        * i_wr_ref can be non-zero. If there are ongoing write,
-        * keep i_snap_realm.
-        */
-       if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
-               drop_inode_snap_realm(ci);
+       if (!__ceph_is_any_real_caps(ci)) {
+               /* when reconnect denied, we remove session caps forcibly,
+                * i_wr_ref can be non-zero. If there are ongoing write,
+                * keep i_snap_realm.
+                */
+               if (ci->i_wr_ref == 0 && ci->i_snap_realm)
+                       drop_inode_snap_realm(ci);
 
-       if (!__ceph_is_any_real_caps(ci))
                __cap_delay_cancel(mdsc, ci);
+       }
 }
 
 struct cap_msg_args {
@@ -2764,7 +2760,19 @@ int ceph_get_caps(struct file *filp, int need, int want,
                if (ret == -EAGAIN)
                        continue;
                if (!ret) {
+                       struct ceph_mds_client *mdsc = fsc->mdsc;
+                       struct cap_wait cw;
                        DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+                       cw.ino = inode->i_ino;
+                       cw.tgid = current->tgid;
+                       cw.need = need;
+                       cw.want = want;
+
+                       spin_lock(&mdsc->caps_list_lock);
+                       list_add(&cw.list, &mdsc->cap_wait_list);
+                       spin_unlock(&mdsc->caps_list_lock);
+
                        add_wait_queue(&ci->i_cap_wq, &wait);
 
                        flags |= NON_BLOCKING;
@@ -2778,6 +2786,11 @@ int ceph_get_caps(struct file *filp, int need, int want,
                        }
 
                        remove_wait_queue(&ci->i_cap_wq, &wait);
+
+                       spin_lock(&mdsc->caps_list_lock);
+                       list_del(&cw.list);
+                       spin_unlock(&mdsc->caps_list_lock);
+
                        if (ret == -EAGAIN)
                                continue;
                }
@@ -2928,7 +2941,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                                ci->i_head_snapc = NULL;
                        }
                        /* see comment in __ceph_remove_cap() */
-                       if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
+                       if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
                                drop_inode_snap_realm(ci);
                }
        spin_unlock(&ci->i_ceph_lock);
index facb387c27356f99864f25e0c0016752c70261fb..c281f32b54f7b6121de9dda389bca07fb9395042 100644 (file)
@@ -139,6 +139,7 @@ static int caps_show(struct seq_file *s, void *p)
        struct ceph_fs_client *fsc = s->private;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        int total, avail, used, reserved, min, i;
+       struct cap_wait *cw;
 
        ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
        seq_printf(s, "total\t\t%d\n"
@@ -166,6 +167,18 @@ static int caps_show(struct seq_file *s, void *p)
        }
        mutex_unlock(&mdsc->mutex);
 
+       seq_printf(s, "\n\nWaiters:\n--------\n");
+       seq_printf(s, "tgid         ino                need             want\n");
+       seq_printf(s, "-----------------------------------------------------\n");
+
+       spin_lock(&mdsc->caps_list_lock);
+       list_for_each_entry(cw, &mdsc->cap_wait_list, list) {
+               seq_printf(s, "%-13d0x%-17lx%-17s%-17s\n", cw->tgid, cw->ino,
+                               ceph_cap_string(cw->need),
+                               ceph_cap_string(cw->want));
+       }
+       spin_unlock(&mdsc->caps_list_lock);
+
        return 0;
 }
 
index 068b029cf07390d1495ae3feffa1fb16d3c5872d..145d46ba25ae29621046e48960fff6539e5b39f3 100644 (file)
@@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kref *kref)
                /* avoid calling iput_final() in mds dispatch threads */
                ceph_async_iput(req->r_inode);
        }
-       if (req->r_parent)
+       if (req->r_parent) {
                ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+               ceph_async_iput(req->r_parent);
+       }
        ceph_async_iput(req->r_target_inode);
        if (req->r_dentry)
                dput(req->r_dentry);
@@ -2015,7 +2017,7 @@ void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
        if (!nr)
                return;
        val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
-       if (!(val % CEPH_CAPS_PER_RELEASE)) {
+       if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
                atomic_set(&mdsc->cap_reclaim_pending, 0);
                ceph_queue_cap_reclaim_work(mdsc);
        }
@@ -2032,12 +2034,13 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
        struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
        size_t size = sizeof(struct ceph_mds_reply_dir_entry);
-       int order, num_entries;
+       unsigned int num_entries;
+       int order;
 
        spin_lock(&ci->i_ceph_lock);
        num_entries = ci->i_files + ci->i_subdirs;
        spin_unlock(&ci->i_ceph_lock);
-       num_entries = max(num_entries, 1);
+       num_entries = max(num_entries, 1U);
        num_entries = min(num_entries, opt->max_readdir);
 
        order = get_order(size * num_entries);
@@ -2675,8 +2678,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
        /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
        if (req->r_inode)
                ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
-       if (req->r_parent)
+       if (req->r_parent) {
                ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+               ihold(req->r_parent);
+       }
        if (req->r_old_dentry_dir)
                ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
                                  CEPH_CAP_PIN);
@@ -4168,6 +4173,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
        mdsc->last_renew_caps = jiffies;
        INIT_LIST_HEAD(&mdsc->cap_delay_list);
+       INIT_LIST_HEAD(&mdsc->cap_wait_list);
        spin_lock_init(&mdsc->cap_delay_lock);
        INIT_LIST_HEAD(&mdsc->snap_flush_list);
        spin_lock_init(&mdsc->snap_flush_lock);
index 5cd131b41d84f4842bf11ed3084d6700dae5112b..14c7e8c49970adb914ff311c14906e4c84cda1d4 100644 (file)
@@ -340,6 +340,14 @@ struct ceph_quotarealm_inode {
        struct inode *inode;
 };
 
+struct cap_wait {
+       struct list_head        list;
+       unsigned long           ino;
+       pid_t                   tgid;
+       int                     need;
+       int                     want;
+};
+
 /*
  * mds client state
  */
@@ -416,6 +424,7 @@ struct ceph_mds_client {
        spinlock_t      caps_list_lock;
        struct          list_head caps_list; /* unused (reserved or
                                                unreserved) */
+       struct          list_head cap_wait_list;
        int             caps_total_count;    /* total caps allocated */
        int             caps_use_count;      /* in use */
        int             caps_use_max;        /* max used caps */
index aeec1d6e3769e41819cb6a46cb593a4bee68d2d0..471bac335fae6ed51b8db1f289e7ec3b94244450 100644 (file)
@@ -158,6 +158,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                void *pexport_targets = NULL;
                struct ceph_timespec laggy_since;
                struct ceph_mds_info *info;
+               bool laggy;
 
                ceph_decode_need(p, end, sizeof(u64) + 1, bad);
                global_id = ceph_decode_64(p);
@@ -190,6 +191,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                if (err)
                        goto corrupt;
                ceph_decode_copy(p, &laggy_since, sizeof(laggy_since));
+               laggy = laggy_since.tv_sec != 0 || laggy_since.tv_nsec != 0;
                *p += sizeof(u32);
                ceph_decode_32_safe(p, end, namelen, bad);
                *p += namelen;
@@ -207,10 +209,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                        *p = info_end;
                }
 
-               dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
+               dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
                     i+1, n, global_id, mds, inc,
                     ceph_pr_addr(&addr),
-                    ceph_mds_state_name(state));
+                    ceph_mds_state_name(state),
+                    laggy ? "(laggy)" : "");
 
                if (mds < 0 || state <= 0)
                        continue;
@@ -230,8 +233,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                info->global_id = global_id;
                info->state = state;
                info->addr = addr;
-               info->laggy = (laggy_since.tv_sec != 0 ||
-                              laggy_since.tv_nsec != 0);
+               info->laggy = laggy;
                info->num_export_targets = num_export_targets;
                if (num_export_targets) {
                        info->export_targets = kcalloc(num_export_targets,
@@ -355,6 +357,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                m->m_damaged = false;
        }
 bad_ext:
+       dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+            !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
        *p = end;
        dout("mdsmap_decode success epoch %u\n", m->m_epoch);
        return m;
index 9c9a7c68eea3be1191aaa6e0616138c0e6b10d3a..29a795f975dfa346e62aa29b004c792a9d830ace 100644 (file)
@@ -172,10 +172,10 @@ static const struct fs_parameter_enum ceph_mount_param_enums[] = {
 static const struct fs_parameter_spec ceph_mount_param_specs[] = {
        fsparam_flag_no ("acl",                         Opt_acl),
        fsparam_flag_no ("asyncreaddir",                Opt_asyncreaddir),
-       fsparam_u32     ("caps_max",                    Opt_caps_max),
+       fsparam_s32     ("caps_max",                    Opt_caps_max),
        fsparam_u32     ("caps_wanted_delay_max",       Opt_caps_wanted_delay_max),
        fsparam_u32     ("caps_wanted_delay_min",       Opt_caps_wanted_delay_min),
-       fsparam_s32     ("write_congestion_kb",         Opt_congestion_kb),
+       fsparam_u32     ("write_congestion_kb",         Opt_congestion_kb),
        fsparam_flag_no ("copyfrom",                    Opt_copyfrom),
        fsparam_flag_no ("dcache",                      Opt_dcache),
        fsparam_flag_no ("dirstat",                     Opt_dirstat),
@@ -187,8 +187,8 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
        fsparam_flag_no ("quotadf",                     Opt_quotadf),
        fsparam_u32     ("rasize",                      Opt_rasize),
        fsparam_flag_no ("rbytes",                      Opt_rbytes),
-       fsparam_s32     ("readdir_max_bytes",           Opt_readdir_max_bytes),
-       fsparam_s32     ("readdir_max_entries",         Opt_readdir_max_entries),
+       fsparam_u32     ("readdir_max_bytes",           Opt_readdir_max_bytes),
+       fsparam_u32     ("readdir_max_entries",         Opt_readdir_max_entries),
        fsparam_enum    ("recover_session",             Opt_recover_session),
        fsparam_flag_no ("require_active_mds",          Opt_require_active_mds),
        fsparam_u32     ("rsize",                       Opt_rsize),
@@ -328,7 +328,9 @@ static int ceph_parse_mount_param(struct fs_context *fc,
                fsopt->caps_wanted_delay_max = result.uint_32;
                break;
        case Opt_caps_max:
-               fsopt->caps_max = result.uint_32;
+               if (result.int_32 < 0)
+                       goto out_of_range;
+               fsopt->caps_max = result.int_32;
                break;
        case Opt_readdir_max_entries:
                if (result.uint_32 < 1)
@@ -547,25 +549,25 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_show_option(m, "recover_session", "clean");
 
        if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
-               seq_printf(m, ",wsize=%d", fsopt->wsize);
+               seq_printf(m, ",wsize=%u", fsopt->wsize);
        if (fsopt->rsize != CEPH_MAX_READ_SIZE)
-               seq_printf(m, ",rsize=%d", fsopt->rsize);
+               seq_printf(m, ",rsize=%u", fsopt->rsize);
        if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
-               seq_printf(m, ",rasize=%d", fsopt->rasize);
+               seq_printf(m, ",rasize=%u", fsopt->rasize);
        if (fsopt->congestion_kb != default_congestion_kb())
-               seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
+               seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
        if (fsopt->caps_max)
                seq_printf(m, ",caps_max=%d", fsopt->caps_max);
        if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
-               seq_printf(m, ",caps_wanted_delay_min=%d",
+               seq_printf(m, ",caps_wanted_delay_min=%u",
                         fsopt->caps_wanted_delay_min);
        if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
-               seq_printf(m, ",caps_wanted_delay_max=%d",
+               seq_printf(m, ",caps_wanted_delay_max=%u",
                           fsopt->caps_wanted_delay_max);
        if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
-               seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
+               seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
        if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
-               seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
+               seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
        if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
                seq_show_option(m, "snapdirname", fsopt->snapdir_name);
 
index f0f9cb7447ac9bfd6c7cb6bd312c2676b6aaa543..3bf1a01cd536dcf1330fec6d7d8a89102c5f6c77 100644 (file)
 #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT     60  /* cap release delay */
 
 struct ceph_mount_options {
-       int flags;
+       unsigned int flags;
 
-       int wsize;            /* max write size */
-       int rsize;            /* max read size */
-       int rasize;           /* max readahead */
-       int congestion_kb;    /* max writeback in flight */
-       int caps_wanted_delay_min, caps_wanted_delay_max;
+       unsigned int wsize;            /* max write size */
+       unsigned int rsize;            /* max read size */
+       unsigned int rasize;           /* max readahead */
+       unsigned int congestion_kb;    /* max writeback in flight */
+       unsigned int caps_wanted_delay_min, caps_wanted_delay_max;
        int caps_max;
-       int max_readdir;       /* max readdir result (entires) */
-       int max_readdir_bytes; /* max readdir result (bytes) */
+       unsigned int max_readdir;       /* max readdir result (entries) */
+       unsigned int max_readdir_bytes; /* max readdir result (bytes) */
 
        /*
         * everything above this point can be memcmp'd; everything below
index 00dfe17871ac2fa4c98f23db35010d89967de96d..c5e6eff5a38164e2dda4b20e1918c287233be1b3 100644 (file)
@@ -352,7 +352,7 @@ static struct kobject *cdev_get(struct cdev *p)
 
        if (owner && !try_module_get(owner))
                return NULL;
-       kobj = kobject_get(&p->kobj);
+       kobj = kobject_get_unless_zero(&p->kobj);
        if (!kobj)
                module_put(owner);
        return kobj;
index fd0262ce5ad5b8ec55960c271ad72c960a2763c1..40705e86245190474322c86241311e487560b15c 100644 (file)
@@ -1061,7 +1061,7 @@ cap_unix(struct cifs_ses *ses)
 struct cached_fid {
        bool is_valid:1;        /* Do we have a useable root fid */
        bool file_all_info_is_valid:1;
-
+       bool has_lease:1;
        struct kref refcount;
        struct cifs_fid *fid;
        struct mutex fid_mutex;
@@ -1693,6 +1693,7 @@ struct cifs_fattr {
        struct timespec64 cf_atime;
        struct timespec64 cf_mtime;
        struct timespec64 cf_ctime;
+       u32             cf_cifstag;
 };
 
 static inline void free_dfs_info_param(struct dfs_info3_param *param)
index 4f554f019a98984fe3a2914d2f1365da6e82a6d6..cc86a67225d1b87fc56f57dbd6e0ad700edb18ce 100644 (file)
@@ -42,6 +42,7 @@
 #include "cifsproto.h"
 #include "cifs_unicode.h"
 #include "cifs_debug.h"
+#include "smb2proto.h"
 #include "fscache.h"
 #include "smbdirect.h"
 #ifdef CONFIG_CIFS_DFS_UPCALL
@@ -112,6 +113,8 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
 
        mutex_lock(&tcon->crfid.fid_mutex);
        tcon->crfid.is_valid = false;
+       /* cached handle is not valid, so SMB2_CLOSE won't be sent below */
+       close_shroot_lease_locked(&tcon->crfid);
        memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
        mutex_unlock(&tcon->crfid.fid_mutex);
 
index 3925a7bfc74d61c6c85f8ab6537498f064512870..d17587c2c4abb67ed9586715ab9313041bb58932 100644 (file)
@@ -139,6 +139,28 @@ retry:
        dput(dentry);
 }
 
+static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
+{
+       if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
+               return false;
+       /*
+        * The DFS tags should be only intepreted by server side as per
+        * MS-FSCC 2.1.2.1, but let's include them anyway.
+        *
+        * Besides, if cf_cifstag is unset (0), then we still need it to be
+        * revalidated to know exactly what reparse point it is.
+        */
+       switch (fattr->cf_cifstag) {
+       case IO_REPARSE_TAG_DFS:
+       case IO_REPARSE_TAG_DFSR:
+       case IO_REPARSE_TAG_SYMLINK:
+       case IO_REPARSE_TAG_NFS:
+       case 0:
+               return true;
+       }
+       return false;
+}
+
 static void
 cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
 {
@@ -158,7 +180,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
         * is a symbolic link, DFS referral or a reparse point with a direct
         * access like junctions, deduplicated files, NFS symlinks.
         */
-       if (fattr->cf_cifsattrs & ATTR_REPARSE)
+       if (reparse_file_needs_reval(fattr))
                fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
 
        /* non-unix readdir doesn't provide nlink */
@@ -194,19 +216,37 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
        }
 }
 
+static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
+{
+       const FILE_DIRECTORY_INFO *fi = info;
+
+       memset(fattr, 0, sizeof(*fattr));
+       fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
+       fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
+       fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
+       fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
+       fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
+       fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
+       fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
+}
+
 void
 cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
                       struct cifs_sb_info *cifs_sb)
 {
-       memset(fattr, 0, sizeof(*fattr));
-       fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
-       fattr->cf_eof = le64_to_cpu(info->EndOfFile);
-       fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
-       fattr->cf_createtime = le64_to_cpu(info->CreationTime);
-       fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
-       fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
-       fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+       __dir_info_to_fattr(fattr, info);
+       cifs_fill_common_info(fattr, cifs_sb);
+}
 
+static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+                                      SEARCH_ID_FULL_DIR_INFO *info,
+                                      struct cifs_sb_info *cifs_sb)
+{
+       __dir_info_to_fattr(fattr, info);
+
+       /* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */
+       if (fattr->cf_cifsattrs & ATTR_REPARSE)
+               fattr->cf_cifstag = le32_to_cpu(info->EaSize);
        cifs_fill_common_info(fattr, cifs_sb);
 }
 
@@ -755,6 +795,11 @@ static int cifs_filldir(char *find_entry, struct file *file,
                                       (FIND_FILE_STANDARD_INFO *)find_entry,
                                       cifs_sb);
                break;
+       case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+               cifs_fulldir_info_to_fattr(&fattr,
+                                          (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+                                          cifs_sb);
+               break;
        default:
                cifs_dir_info_to_fattr(&fattr,
                                       (FILE_DIRECTORY_INFO *)find_entry,
index 8b0b512c57920cebd5a022c2357a31e376cf87ed..afe1f03aabe386cab41a9a4f502115b1d5ded406 100644 (file)
@@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                goto out;
 
 
-        if (oparms->tcon->use_resilient) {
+       if (oparms->tcon->use_resilient) {
                /* default timeout is 0, servers pick default (120 seconds) */
                nr_ioctl_req.Timeout =
                        cpu_to_le32(oparms->tcon->handle_timeout);
index 18c7a33adcebf36b7908566897de44a1de683c46..5ef5e97a6d13eb8171c5b49ce2f02096dc18a391 100644 (file)
@@ -95,6 +95,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                goto finished;
        }
 
+       memset(&oparms, 0, sizeof(struct cifs_open_parms));
        oparms.tcon = tcon;
        oparms.desired_access = desired_access;
        oparms.disposition = create_disposition;
index a5c96bc522cb3e3afaf2f8e582aa586f5325cfc0..6250370c11702b1346abeec4a971b8ff6d07fa16 100644 (file)
@@ -616,6 +616,7 @@ smb2_close_cached_fid(struct kref *ref)
                           cfid->fid->volatile_fid);
                cfid->is_valid = false;
                cfid->file_all_info_is_valid = false;
+               cfid->has_lease = false;
        }
 }
 
@@ -626,13 +627,28 @@ void close_shroot(struct cached_fid *cfid)
        mutex_unlock(&cfid->fid_mutex);
 }
 
+void close_shroot_lease_locked(struct cached_fid *cfid)
+{
+       if (cfid->has_lease) {
+               cfid->has_lease = false;
+               kref_put(&cfid->refcount, smb2_close_cached_fid);
+       }
+}
+
+void close_shroot_lease(struct cached_fid *cfid)
+{
+       mutex_lock(&cfid->fid_mutex);
+       close_shroot_lease_locked(cfid);
+       mutex_unlock(&cfid->fid_mutex);
+}
+
 void
 smb2_cached_lease_break(struct work_struct *work)
 {
        struct cached_fid *cfid = container_of(work,
                                struct cached_fid, lease_break);
 
-       close_shroot(cfid);
+       close_shroot_lease(cfid);
 }
 
 /*
@@ -773,6 +789,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        /* BB TBD check to see if oplock level check can be removed below */
        if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
                kref_get(&tcon->crfid.refcount);
+               tcon->crfid.has_lease = true;
                smb2_parse_contexts(server, o_rsp,
                                &oparms.fid->epoch,
                                oparms.fid->lease_key, &oplock, NULL);
index 0ab6b1200288e304b0e9b724985d8ac3015292b4..9434f6dd8df327a104a2c427dda36bfc1f553718 100644 (file)
@@ -1847,7 +1847,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
                return 0;
 
-       close_shroot(&tcon->crfid);
+       close_shroot_lease(&tcon->crfid);
 
        rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
                             &total_len);
index a18272c987fed92ebbc0347c3e68a5565499ff23..27d29f2eb6c82f619331ab44b169460b777a86d7 100644 (file)
@@ -70,6 +70,8 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
 extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
                        struct cifs_fid *pfid);
 extern void close_shroot(struct cached_fid *cfid);
+extern void close_shroot_lease(struct cached_fid *cfid);
+extern void close_shroot_lease_locked(struct cached_fid *cfid);
 extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
                                   struct smb2_file_all_info *src);
 extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
index 040df1f5e1c8b1d1f6f0bcc766b1e4918ab6e839..40cca351273f4c79a093a1c8aa5117cb6ca2f8eb 100644 (file)
@@ -151,7 +151,7 @@ static struct key *search_fscrypt_keyring(struct key *keyring,
 }
 
 #define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE    \
-       (CONST_STRLEN("fscrypt-") + FIELD_SIZEOF(struct super_block, s_id))
+       (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id))
 
 #define FSCRYPT_MK_DESCRIPTION_SIZE    (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1)
 
index 0ec4f270139f6774d97700a79bd6d0be52752ff1..00b4d15bb811af2506a493d3f889bbee7dd179b5 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/atomic.h>
 #include <linux/prefetch.h>
 
+#include "internal.h"
+
 /*
  * How many user pages to map in one call to get_user_pages().  This determines
  * the size of a structure in the slab cache
index d31b6c72b47646fd8ae2ebd3746de307f8017fa0..dc1a1d5d825b48de17f3192f8d0c50829dd07885 100644 (file)
@@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
                spin_unlock(&inode->i_lock);
                spin_unlock(&sb->s_inode_list_lock);
 
-               cond_resched();
                invalidate_mapping_pages(inode->i_mapping, 0, -1);
                iput(toput_inode);
                toput_inode = inode;
 
+               cond_resched();
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);
index a13a78725c571372e2af4ebd9dc204eed8a1cadb..b766c3ee5fa8cbf955d0364687cb9cab97a92eac 100644 (file)
@@ -649,6 +649,8 @@ ssize_t erofs_listxattr(struct dentry *dentry,
        struct listxattr_iter it;
 
        ret = init_inode_xattrs(d_inode(dentry));
+       if (ret == -ENOATTR)
+               return 0;
        if (ret)
                return ret;
 
index d4d4fdfac1a654ad627d8280c97e7718a73340ae..1ee04e76bbe0404b95d3a0be205266ff5fdcf4f6 100644 (file)
@@ -133,10 +133,13 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
 {
        struct rb_node *node;
        struct ext4_system_zone *entry;
+       struct ext4_system_blocks *system_blks;
        int first = 1;
 
        printk(KERN_INFO "System zones: ");
-       node = rb_first(&sbi->system_blks->root);
+       rcu_read_lock();
+       system_blks = rcu_dereference(sbi->system_blks);
+       node = rb_first(&system_blks->root);
        while (node) {
                entry = rb_entry(node, struct ext4_system_zone, node);
                printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
@@ -144,6 +147,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
                first = 0;
                node = rb_next(node);
        }
+       rcu_read_unlock();
        printk(KERN_CONT "\n");
 }
 
index 9fdd2b269d6172a6856ef0fb98419bf7ed599671..9f00fc0bf21d2ac59d8f589bd58061d091dcb478 100644 (file)
@@ -72,6 +72,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
        const char *error_msg = NULL;
        const int rlen = ext4_rec_len_from_disk(de->rec_len,
                                                dir->i_sb->s_blocksize);
+       const int next_offset = ((char *) de - buf) + rlen;
 
        if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
                error_msg = "rec_len is smaller than minimal";
@@ -79,8 +80,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
                error_msg = "rec_len % 4 != 0";
        else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
                error_msg = "rec_len is too small for name_len";
-       else if (unlikely(((char *) de - buf) + rlen > size))
+       else if (unlikely(next_offset > size))
                error_msg = "directory entry overrun";
+       else if (unlikely(next_offset > size - EXT4_DIR_REC_LEN(1) &&
+                         next_offset != size))
+               error_msg = "directory entry too close to block end";
        else if (unlikely(le32_to_cpu(de->inode) >
                        le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
                error_msg = "inode out of bounds";
index dc333e8e51e89a4951f02b0fbc2a91cf32434a59..8ca4a23129aafd389f0a6a126e25d3640b38dd37 100644 (file)
@@ -921,8 +921,8 @@ repeat_in_this_group:
                if (!handle) {
                        BUG_ON(nblocks <= 0);
                        handle = __ext4_journal_start_sb(dir->i_sb, line_no,
-                                                        handle_type, nblocks,
-                                                        0, 0);
+                                handle_type, nblocks, 0,
+                                ext4_trans_default_revoke_credits(sb));
                        if (IS_ERR(handle)) {
                                err = PTR_ERR(handle);
                                ext4_std_error(sb, err);
index 92a9da1774aaad37e9d51dcd61cf821d67423a1d..bbce1c328d8557e177f33365600085e5610afdd9 100644 (file)
@@ -25,7 +25,7 @@
  * For constructing the negative timestamp lower bound value.
  * binary: 10000000 00000000 00000000 00000000
  */
-#define LOWER_MSB_1 (-0x80000000L)
+#define LOWER_MSB_1 (-(UPPER_MSB_0) - 1L)  /* avoid overflow */
 /*
  * For constructing the negative timestamp upper bound value.
  * binary: 11111111 11111111 11111111 11111111
index 28f28de0c1b67e116228f3413bd322bb681f5074..629a25d999f07d17977783439162163dd4f89d7a 100644 (file)
@@ -5692,7 +5692,7 @@ int ext4_expand_extra_isize(struct inode *inode,
        error = ext4_journal_get_write_access(handle, iloc->bh);
        if (error) {
                brelse(iloc->bh);
-               goto out_stop;
+               goto out_unlock;
        }
 
        error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
@@ -5702,8 +5702,8 @@ int ext4_expand_extra_isize(struct inode *inode,
        if (!error)
                error = rc;
 
+out_unlock:
        ext4_write_unlock_xattr(inode, &no_expand);
-out_stop:
        ext4_journal_stop(handle);
        return error;
 }
index a856997d87b54c83ae59ea78ed9475a8e4a3c855..1cb42d94078479dc9a9e107c79e361e0c385a02b 100644 (file)
@@ -2164,7 +2164,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        struct buffer_head *bh = NULL;
        struct ext4_dir_entry_2 *de;
        struct super_block *sb;
+#ifdef CONFIG_UNICODE
        struct ext4_sb_info *sbi;
+#endif
        struct ext4_filename fname;
        int     retval;
        int     dx_fallback=0;
@@ -2176,12 +2178,12 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                csum_size = sizeof(struct ext4_dir_entry_tail);
 
        sb = dir->i_sb;
-       sbi = EXT4_SB(sb);
        blocksize = sb->s_blocksize;
        if (!dentry->d_name.len)
                return -EINVAL;
 
 #ifdef CONFIG_UNICODE
+       sbi = EXT4_SB(sb);
        if (ext4_has_strict_mode(sbi) && IS_CASEFOLDED(dir) &&
            sbi->s_encoding && utf8_validate(sbi->s_encoding, &dentry->d_name))
                return -EINVAL;
@@ -2822,7 +2824,7 @@ bool ext4_empty_dir(struct inode *inode)
 {
        unsigned int offset;
        struct buffer_head *bh;
-       struct ext4_dir_entry_2 *de, *de1;
+       struct ext4_dir_entry_2 *de;
        struct super_block *sb;
 
        if (ext4_has_inline_data(inode)) {
@@ -2847,19 +2849,25 @@ bool ext4_empty_dir(struct inode *inode)
                return true;
 
        de = (struct ext4_dir_entry_2 *) bh->b_data;
-       de1 = ext4_next_entry(de, sb->s_blocksize);
-       if (le32_to_cpu(de->inode) != inode->i_ino ||
-                       le32_to_cpu(de1->inode) == 0 ||
-                       strcmp(".", de->name) || strcmp("..", de1->name)) {
-               ext4_warning_inode(inode, "directory missing '.' and/or '..'");
+       if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+                                0) ||
+           le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
+               ext4_warning_inode(inode, "directory missing '.'");
+               brelse(bh);
+               return true;
+       }
+       offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+       de = ext4_next_entry(de, sb->s_blocksize);
+       if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+                                offset) ||
+           le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
+               ext4_warning_inode(inode, "directory missing '..'");
                brelse(bh);
                return true;
        }
-       offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
-                ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
-       de = ext4_next_entry(de1, sb->s_blocksize);
+       offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
        while (offset < inode->i_size) {
-               if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
+               if (!(offset & (sb->s_blocksize - 1))) {
                        unsigned int lblock;
                        brelse(bh);
                        lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
@@ -2870,12 +2878,11 @@ bool ext4_empty_dir(struct inode *inode)
                        }
                        if (IS_ERR(bh))
                                return true;
-                       de = (struct ext4_dir_entry_2 *) bh->b_data;
                }
+               de = (struct ext4_dir_entry_2 *) (bh->b_data +
+                                       (offset & (sb->s_blocksize - 1)));
                if (ext4_check_dir_entry(inode, NULL, de, bh,
                                         bh->b_data, bh->b_size, offset)) {
-                       de = (struct ext4_dir_entry_2 *)(bh->b_data +
-                                                        sb->s_blocksize);
                        offset = (offset | (sb->s_blocksize - 1)) + 1;
                        continue;
                }
@@ -2884,7 +2891,6 @@ bool ext4_empty_dir(struct inode *inode)
                        return false;
                }
                offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
-               de = ext4_next_entry(de, sb->s_blocksize);
        }
        brelse(bh);
        return true;
index 1d82b56d9b11f362b35582ab0f804a5ca44f0189..2937a8873fe135efc165e4ba954ef2855e362696 100644 (file)
@@ -1900,6 +1900,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                }
                sbi->s_commit_interval = HZ * arg;
        } else if (token == Opt_debug_want_extra_isize) {
+               if ((arg & 1) ||
+                   (arg < 4) ||
+                   (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
+                       ext4_msg(sb, KERN_ERR,
+                                "Invalid want_extra_isize %d", arg);
+                       return -1;
+               }
                sbi->s_want_extra_isize = arg;
        } else if (token == Opt_max_batch_time) {
                sbi->s_max_batch_time = arg;
@@ -3554,40 +3561,6 @@ int ext4_calculate_overhead(struct super_block *sb)
        return 0;
 }
 
-static void ext4_clamp_want_extra_isize(struct super_block *sb)
-{
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_super_block *es = sbi->s_es;
-       unsigned def_extra_isize = sizeof(struct ext4_inode) -
-                                               EXT4_GOOD_OLD_INODE_SIZE;
-
-       if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
-               sbi->s_want_extra_isize = 0;
-               return;
-       }
-       if (sbi->s_want_extra_isize < 4) {
-               sbi->s_want_extra_isize = def_extra_isize;
-               if (ext4_has_feature_extra_isize(sb)) {
-                       if (sbi->s_want_extra_isize <
-                           le16_to_cpu(es->s_want_extra_isize))
-                               sbi->s_want_extra_isize =
-                                       le16_to_cpu(es->s_want_extra_isize);
-                       if (sbi->s_want_extra_isize <
-                           le16_to_cpu(es->s_min_extra_isize))
-                               sbi->s_want_extra_isize =
-                                       le16_to_cpu(es->s_min_extra_isize);
-               }
-       }
-       /* Check if enough inode space is available */
-       if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
-           (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
-                                                       sbi->s_inode_size)) {
-               sbi->s_want_extra_isize = def_extra_isize;
-               ext4_msg(sb, KERN_INFO,
-                        "required extra inode space not available");
-       }
-}
-
 static void ext4_set_resv_clusters(struct super_block *sb)
 {
        ext4_fsblk_t resv_clusters;
@@ -3795,6 +3768,68 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
         */
        sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
 
+       if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
+               sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
+               sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
+       } else {
+               sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+               sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+               if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+                       ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+                                sbi->s_first_ino);
+                       goto failed_mount;
+               }
+               if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
+                   (!is_power_of_2(sbi->s_inode_size)) ||
+                   (sbi->s_inode_size > blocksize)) {
+                       ext4_msg(sb, KERN_ERR,
+                              "unsupported inode size: %d",
+                              sbi->s_inode_size);
+                       goto failed_mount;
+               }
+               /*
+                * i_atime_extra is the last extra field available for
+                * [acm]times in struct ext4_inode. Checking for that
+                * field should suffice to ensure we have extra space
+                * for all three.
+                */
+               if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
+                       sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
+                       sb->s_time_gran = 1;
+                       sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
+               } else {
+                       sb->s_time_gran = NSEC_PER_SEC;
+                       sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
+               }
+               sb->s_time_min = EXT4_TIMESTAMP_MIN;
+       }
+       if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+               sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+                       EXT4_GOOD_OLD_INODE_SIZE;
+               if (ext4_has_feature_extra_isize(sb)) {
+                       unsigned v, max = (sbi->s_inode_size -
+                                          EXT4_GOOD_OLD_INODE_SIZE);
+
+                       v = le16_to_cpu(es->s_want_extra_isize);
+                       if (v > max) {
+                               ext4_msg(sb, KERN_ERR,
+                                        "bad s_want_extra_isize: %d", v);
+                               goto failed_mount;
+                       }
+                       if (sbi->s_want_extra_isize < v)
+                               sbi->s_want_extra_isize = v;
+
+                       v = le16_to_cpu(es->s_min_extra_isize);
+                       if (v > max) {
+                               ext4_msg(sb, KERN_ERR,
+                                        "bad s_min_extra_isize: %d", v);
+                               goto failed_mount;
+                       }
+                       if (sbi->s_want_extra_isize < v)
+                               sbi->s_want_extra_isize = v;
+               }
+       }
+
        if (sbi->s_es->s_mount_opts[0]) {
                char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
                                              sizeof(sbi->s_es->s_mount_opts),
@@ -4033,42 +4068,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                                      has_huge_files);
        sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
 
-       if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
-               sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
-               sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
-       } else {
-               sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
-               sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
-               if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
-                       ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
-                                sbi->s_first_ino);
-                       goto failed_mount;
-               }
-               if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
-                   (!is_power_of_2(sbi->s_inode_size)) ||
-                   (sbi->s_inode_size > blocksize)) {
-                       ext4_msg(sb, KERN_ERR,
-                              "unsupported inode size: %d",
-                              sbi->s_inode_size);
-                       goto failed_mount;
-               }
-               /*
-                * i_atime_extra is the last extra field available for [acm]times in
-                * struct ext4_inode. Checking for that field should suffice to ensure
-                * we have extra space for all three.
-                */
-               if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
-                       sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
-                       sb->s_time_gran = 1;
-                       sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
-               } else {
-                       sb->s_time_gran = NSEC_PER_SEC;
-                       sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
-               }
-
-               sb->s_time_min = EXT4_TIMESTAMP_MIN;
-       }
-
        sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
        if (ext4_has_feature_64bit(sb)) {
                if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
@@ -4517,8 +4516,6 @@ no_journal:
        } else if (ret)
                goto failed_mount4a;
 
-       ext4_clamp_want_extra_isize(sb);
-
        ext4_set_resv_clusters(sb);
 
        err = ext4_setup_system_zone(sb);
@@ -5306,8 +5303,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
-       ext4_clamp_want_extra_isize(sb);
-
        if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
            test_opt(sb, JOURNAL_CHECKSUM)) {
                ext4_msg(sb, KERN_ERR, "changing journal_checksum "
index a63d779eac10409b6e44a00c70ed7bf8d6acb832..ce715380143cd890057605575323ecddc01706d1 100644 (file)
@@ -882,6 +882,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
        struct fuse_args_pages *ap = &ia->ap;
        loff_t pos = page_offset(ap->pages[0]);
        size_t count = ap->num_pages << PAGE_SHIFT;
+       ssize_t res;
        int err;
 
        ap->args.out_pages = true;
@@ -896,7 +897,8 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
                if (!err)
                        return;
        } else {
-               err = fuse_simple_request(fc, &ap->args);
+               res = fuse_simple_request(fc, &ap->args);
+               err = res < 0 ? res : 0;
        }
        fuse_readpages_end(fc, &ap->args, err);
 }
index d5c2a315861064b459a0adef1d6d3e490d05dbff..a66e425884d14e000aa2f85b85c776cae0d347c0 100644 (file)
@@ -1498,8 +1498,10 @@ static int __init init_hugetlbfs_fs(void)
        /* other hstates are optional */
        i = 0;
        for_each_hstate(h) {
-               if (i == default_hstate_idx)
+               if (i == default_hstate_idx) {
+                       i++;
                        continue;
+               }
 
                mnt = mount_one_hugetlbfs(h);
                if (IS_ERR(mnt))
index fef457a42882ba64092ae01577d8a3746f03f6e8..96d62d97694ef333e14ab35060084c6f486339ff 100644 (file)
@@ -676,6 +676,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
        struct inode *inode, *next;
        LIST_HEAD(dispose);
 
+again:
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
                spin_lock(&inode->i_lock);
@@ -698,6 +699,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
                inode_lru_list_del(inode);
                spin_unlock(&inode->i_lock);
                list_add(&inode->i_lru, &dispose);
+               if (need_resched()) {
+                       spin_unlock(&sb->s_inode_list_lock);
+                       cond_resched();
+                       dispose_list(&dispose);
+                       goto again;
+               }
        }
        spin_unlock(&sb->s_inode_list_lock);
 
index 4a7da1df573da9070fa352c1ba5d538b61aaecfe..e3fa69544b66c0bd18e99a023ccf0f32f7b8e009 100644 (file)
@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
 /*
  * buffer.c
  */
-extern void guard_bio_eod(int rw, struct bio *bio);
+extern void guard_bio_eod(struct bio *bio);
 extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
                get_block_t *get_block, struct iomap *iomap);
 
index 74b40506c5d916324404af2087395a72c2e73fee..5147d2213b019f9b80841a0e14a1709ba719bf69 100644 (file)
@@ -49,7 +49,6 @@ struct io_worker {
        struct hlist_nulls_node nulls_node;
        struct list_head all_list;
        struct task_struct *task;
-       wait_queue_head_t wait;
        struct io_wqe *wqe;
 
        struct io_wq_work *cur_work;
@@ -93,7 +92,6 @@ struct io_wqe {
        struct io_wqe_acct acct[2];
 
        struct hlist_nulls_head free_list;
-       struct hlist_nulls_head busy_list;
        struct list_head all_list;
 
        struct io_wq *wq;
@@ -258,7 +256,7 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
 
        worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
        if (io_worker_get(worker)) {
-               wake_up(&worker->wait);
+               wake_up_process(worker->task);
                io_worker_release(worker);
                return true;
        }
@@ -328,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
        if (worker->flags & IO_WORKER_F_FREE) {
                worker->flags &= ~IO_WORKER_F_FREE;
                hlist_nulls_del_init_rcu(&worker->nulls_node);
-               hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
        }
 
        /*
@@ -366,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
 {
        if (!(worker->flags & IO_WORKER_F_FREE)) {
                worker->flags |= IO_WORKER_F_FREE;
-               hlist_nulls_del_init_rcu(&worker->nulls_node);
                hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
        }
 
@@ -433,6 +429,8 @@ next:
                if (signal_pending(current))
                        flush_signals(current);
 
+               cond_resched();
+
                spin_lock_irq(&worker->lock);
                worker->cur_work = work;
                spin_unlock_irq(&worker->lock);
@@ -447,10 +445,14 @@ next:
                        task_unlock(current);
                }
                if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
-                   wq->mm && mmget_not_zero(wq->mm)) {
-                       use_mm(wq->mm);
-                       set_fs(USER_DS);
-                       worker->mm = wq->mm;
+                   wq->mm) {
+                       if (mmget_not_zero(wq->mm)) {
+                               use_mm(wq->mm);
+                               set_fs(USER_DS);
+                               worker->mm = wq->mm;
+                       } else {
+                               work->flags |= IO_WQ_WORK_CANCEL;
+                       }
                }
                if (!worker->creds)
                        worker->creds = override_creds(wq->creds);
@@ -492,28 +494,46 @@ next:
        } while (1);
 }
 
+static inline void io_worker_spin_for_work(struct io_wqe *wqe)
+{
+       int i = 0;
+
+       while (++i < 1000) {
+               if (io_wqe_run_queue(wqe))
+                       break;
+               if (need_resched())
+                       break;
+               cpu_relax();
+       }
+}
+
 static int io_wqe_worker(void *data)
 {
        struct io_worker *worker = data;
        struct io_wqe *wqe = worker->wqe;
        struct io_wq *wq = wqe->wq;
-       DEFINE_WAIT(wait);
+       bool did_work;
 
        io_worker_start(wqe, worker);
 
+       did_work = false;
        while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
-               prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE);
-
+               set_current_state(TASK_INTERRUPTIBLE);
+loop:
+               if (did_work)
+                       io_worker_spin_for_work(wqe);
                spin_lock_irq(&wqe->lock);
                if (io_wqe_run_queue(wqe)) {
                        __set_current_state(TASK_RUNNING);
                        io_worker_handle_work(worker);
-                       continue;
+                       did_work = true;
+                       goto loop;
                }
+               did_work = false;
                /* drops the lock on success, retry */
                if (__io_worker_idle(wqe, worker)) {
                        __release(&wqe->lock);
-                       continue;
+                       goto loop;
                }
                spin_unlock_irq(&wqe->lock);
                if (signal_pending(current))
@@ -526,8 +546,6 @@ static int io_wqe_worker(void *data)
                        break;
        }
 
-       finish_wait(&worker->wait, &wait);
-
        if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
                spin_lock_irq(&wqe->lock);
                if (!wq_list_empty(&wqe->work_list))
@@ -589,7 +607,6 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
 
        refcount_set(&worker->ref, 1);
        worker->nulls_node.pprev = NULL;
-       init_waitqueue_head(&worker->wait);
        worker->wqe = wqe;
        spin_lock_init(&worker->lock);
 
@@ -784,10 +801,6 @@ void io_wq_cancel_all(struct io_wq *wq)
 
        set_bit(IO_WQ_BIT_CANCEL, &wq->state);
 
-       /*
-        * Browse both lists, as there's a gap between handing work off
-        * to a worker and the worker putting itself on the busy_list
-        */
        rcu_read_lock();
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
@@ -934,7 +947,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
        /*
         * Now check if a free (going busy) or busy worker has the work
         * currently running. If we find it there, we'll return CANCEL_RUNNING
-        * as an indication that we attempte to signal cancellation. The
+        * as an indication that we attempt to signal cancellation. The
         * completion will run normally in this case.
         */
        rcu_read_lock();
@@ -1035,7 +1048,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
                spin_lock_init(&wqe->lock);
                INIT_WQ_LIST(&wqe->work_list);
                INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
-               INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
                INIT_LIST_HEAD(&wqe->all_list);
        }
 
index 7c333a28e2a7e9874f27c85492b943b5d33fc935..3f5e356de98050f67503336cfac1db64f455230d 100644 (file)
@@ -35,7 +35,8 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node,
                                    struct io_wq_work_list *list)
 {
        if (!list->first) {
-               list->first = list->last = node;
+               list->last = node;
+               WRITE_ONCE(list->first, node);
        } else {
                list->last->next = node;
                list->last = node;
@@ -47,7 +48,7 @@ static inline void wq_node_del(struct io_wq_work_list *list,
                               struct io_wq_work_node *prev)
 {
        if (node == list->first)
-               list->first = node->next;
+               WRITE_ONCE(list->first, node->next);
        if (node == list->last)
                list->last = prev;
        if (prev)
@@ -58,7 +59,7 @@ static inline void wq_node_del(struct io_wq_work_list *list,
 #define wq_list_for_each(pos, prv, head)                       \
        for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
 
-#define wq_list_empty(list)    ((list)->first == NULL)
+#define wq_list_empty(list)    (READ_ONCE((list)->first) == NULL)
 #define INIT_WQ_LIST(list)     do {                            \
        (list)->first = NULL;                                   \
        (list)->last = NULL;                                    \
@@ -119,6 +120,10 @@ static inline void io_wq_worker_sleeping(struct task_struct *tsk)
 static inline void io_wq_worker_running(struct task_struct *tsk)
 {
 }
-#endif /* CONFIG_IO_WQ */
+#endif
 
-#endif /* INTERNAL_IO_WQ_H */
+static inline bool io_wq_current_is_worker(void)
+{
+       return in_task() && (current->flags & PF_IO_WORKER);
+}
+#endif
index 405be10da73d4bb37b2fea9a3ca9f9247e01a5b4..e54556b0fcc69755cb39d59c361ff0a855d33c3a 100644 (file)
@@ -289,11 +289,14 @@ struct io_ring_ctx {
  */
 struct io_poll_iocb {
        struct file                     *file;
-       struct wait_queue_head          *head;
+       union {
+               struct wait_queue_head  *head;
+               u64                     addr;
+       };
        __poll_t                        events;
        bool                            done;
        bool                            canceled;
-       struct wait_queue_entry         *wait;
+       struct wait_queue_entry         wait;
 };
 
 struct io_timeout_data {
@@ -304,6 +307,51 @@ struct io_timeout_data {
        u32                             seq_offset;
 };
 
+struct io_accept {
+       struct file                     *file;
+       struct sockaddr __user          *addr;
+       int __user                      *addr_len;
+       int                             flags;
+};
+
+struct io_sync {
+       struct file                     *file;
+       loff_t                          len;
+       loff_t                          off;
+       int                             flags;
+};
+
+struct io_cancel {
+       struct file                     *file;
+       u64                             addr;
+};
+
+struct io_timeout {
+       struct file                     *file;
+       u64                             addr;
+       int                             flags;
+       unsigned                        count;
+};
+
+struct io_rw {
+       /* NOTE: kiocb has the file as the first member, so don't do it here */
+       struct kiocb                    kiocb;
+       u64                             addr;
+       u64                             len;
+};
+
+struct io_connect {
+       struct file                     *file;
+       struct sockaddr __user          *addr;
+       int                             addr_len;
+};
+
+struct io_sr_msg {
+       struct file                     *file;
+       struct user_msghdr __user       *msg;
+       int                             msg_flags;
+};
+
 struct io_async_connect {
        struct sockaddr_storage         address;
 };
@@ -323,7 +371,6 @@ struct io_async_rw {
 };
 
 struct io_async_ctx {
-       struct io_uring_sqe             sqe;
        union {
                struct io_async_rw      rw;
                struct io_async_msghdr  msg;
@@ -341,17 +388,23 @@ struct io_async_ctx {
 struct io_kiocb {
        union {
                struct file             *file;
-               struct kiocb            rw;
+               struct io_rw            rw;
                struct io_poll_iocb     poll;
+               struct io_accept        accept;
+               struct io_sync          sync;
+               struct io_cancel        cancel;
+               struct io_timeout       timeout;
+               struct io_connect       connect;
+               struct io_sr_msg        sr_msg;
        };
 
-       const struct io_uring_sqe       *sqe;
        struct io_async_ctx             *io;
        struct file                     *ring_file;
        int                             ring_fd;
        bool                            has_user;
        bool                            in_async;
        bool                            needs_fixed_file;
+       u8                              opcode;
 
        struct io_ring_ctx      *ctx;
        union {
@@ -377,6 +430,7 @@ struct io_kiocb {
 #define REQ_F_TIMEOUT_NOSEQ    8192    /* no timeout sequence */
 #define REQ_F_INFLIGHT         16384   /* on inflight list */
 #define REQ_F_COMP_LOCKED      32768   /* completion under lock */
+#define REQ_F_HARDLINK         65536   /* doesn't sever on completion < 0 */
        u64                     user_data;
        u32                     result;
        u32                     sequence;
@@ -563,12 +617,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
        }
 }
 
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+static inline bool io_req_needs_user(struct io_kiocb *req)
 {
-       u8 opcode = READ_ONCE(sqe->opcode);
-
-       return !(opcode == IORING_OP_READ_FIXED ||
-                opcode == IORING_OP_WRITE_FIXED);
+       return !(req->opcode == IORING_OP_READ_FIXED ||
+                req->opcode == IORING_OP_WRITE_FIXED);
 }
 
 static inline bool io_prep_async_work(struct io_kiocb *req,
@@ -576,31 +628,31 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
 {
        bool do_hashed = false;
 
-       if (req->sqe) {
-               switch (req->sqe->opcode) {
-               case IORING_OP_WRITEV:
-               case IORING_OP_WRITE_FIXED:
+       switch (req->opcode) {
+       case IORING_OP_WRITEV:
+       case IORING_OP_WRITE_FIXED:
+               /* only regular files should be hashed for writes */
+               if (req->flags & REQ_F_ISREG)
                        do_hashed = true;
-                       /* fall-through */
-               case IORING_OP_READV:
-               case IORING_OP_READ_FIXED:
-               case IORING_OP_SENDMSG:
-               case IORING_OP_RECVMSG:
-               case IORING_OP_ACCEPT:
-               case IORING_OP_POLL_ADD:
-               case IORING_OP_CONNECT:
-                       /*
-                        * We know REQ_F_ISREG is not set on some of these
-                        * opcodes, but this enables us to keep the check in
-                        * just one place.
-                        */
-                       if (!(req->flags & REQ_F_ISREG))
-                               req->work.flags |= IO_WQ_WORK_UNBOUND;
-                       break;
-               }
-               if (io_sqe_needs_user(req->sqe))
-                       req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+               /* fall-through */
+       case IORING_OP_READV:
+       case IORING_OP_READ_FIXED:
+       case IORING_OP_SENDMSG:
+       case IORING_OP_RECVMSG:
+       case IORING_OP_ACCEPT:
+       case IORING_OP_POLL_ADD:
+       case IORING_OP_CONNECT:
+               /*
+                * We know REQ_F_ISREG is not set on some of these
+                * opcodes, but this enables us to keep the check in
+                * just one place.
+                */
+               if (!(req->flags & REQ_F_ISREG))
+                       req->work.flags |= IO_WQ_WORK_UNBOUND;
+               break;
        }
+       if (io_req_needs_user(req))
+               req->work.flags |= IO_WQ_WORK_NEEDS_USER;
 
        *link = io_prep_linked_timeout(req);
        return do_hashed;
@@ -969,7 +1021,7 @@ static void io_fail_links(struct io_kiocb *req)
                trace_io_uring_fail_link(req, link);
 
                if ((req->flags & REQ_F_LINK_TIMEOUT) &&
-                   link->sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+                   link->opcode == IORING_OP_LINK_TIMEOUT) {
                        io_link_cancel_timeout(link);
                } else {
                        io_cqring_fill_event(link, -ECANCELED);
@@ -1145,7 +1197,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
 
        ret = 0;
        list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
-               struct kiocb *kiocb = &req->rw;
+               struct kiocb *kiocb = &req->rw.kiocb;
 
                /*
                 * Move completed entries to our local list. If we find a
@@ -1175,7 +1227,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
 }
 
 /*
- * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
+ * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
  * non-spinning poll check - we'll still enter the driver poll loop, but only
  * as a non-spinning completion check.
  */
@@ -1292,21 +1344,27 @@ static void kiocb_end_write(struct io_kiocb *req)
        file_end_write(req->file);
 }
 
+static inline void req_set_fail_links(struct io_kiocb *req)
+{
+       if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
+               req->flags |= REQ_F_FAIL_LINK;
+}
+
 static void io_complete_rw_common(struct kiocb *kiocb, long res)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
 
-       if ((req->flags & REQ_F_LINK) && res != req->result)
-               req->flags |= REQ_F_FAIL_LINK;
+       if (res != req->result)
+               req_set_fail_links(req);
        io_cqring_add_event(req, res);
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        io_complete_rw_common(kiocb, res);
        io_put_req(req);
@@ -1314,7 +1372,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 
 static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_kiocb *nxt = NULL;
 
        io_complete_rw_common(kiocb, res);
@@ -1325,13 +1383,13 @@ static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 {
-       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+       struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
 
-       if ((req->flags & REQ_F_LINK) && res != req->result)
-               req->flags |= REQ_F_FAIL_LINK;
+       if (res != req->result)
+               req_set_fail_links(req);
        req->result = res;
        if (res != -EAGAIN)
                req->flags |= REQ_F_IOPOLL_COMPLETED;
@@ -1359,7 +1417,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
 
                list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
                                                list);
-               if (list_req->rw.ki_filp != req->rw.ki_filp)
+               if (list_req->file != req->file)
                        ctx->poll_multi_file = true;
        }
 
@@ -1422,7 +1480,7 @@ static bool io_file_supports_async(struct file *file)
 {
        umode_t mode = file_inode(file)->i_mode;
 
-       if (S_ISBLK(mode) || S_ISCHR(mode))
+       if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
                return true;
        if (S_ISREG(mode) && file->f_op != &io_uring_fops)
                return true;
@@ -1430,11 +1488,11 @@ static bool io_file_supports_async(struct file *file)
        return false;
 }
 
-static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     bool force_nonblock)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        unsigned ioprio;
        int ret;
 
@@ -1483,6 +1541,12 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
                        return -EINVAL;
                kiocb->ki_complete = io_complete_rw;
        }
+
+       req->rw.addr = READ_ONCE(sqe->addr);
+       req->rw.len = READ_ONCE(sqe->len);
+       /* we own ->private, reuse it for the buffer index */
+       req->rw.kiocb.private = (void *) (unsigned long)
+                                       READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -1516,11 +1580,11 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
                io_rw_done(kiocb, ret);
 }
 
-static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
-                              const struct io_uring_sqe *sqe,
+static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
                               struct iov_iter *iter)
 {
-       size_t len = READ_ONCE(sqe->len);
+       struct io_ring_ctx *ctx = req->ctx;
+       size_t len = req->rw.len;
        struct io_mapped_ubuf *imu;
        unsigned index, buf_index;
        size_t offset;
@@ -1530,13 +1594,13 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
        if (unlikely(!ctx->user_bufs))
                return -EFAULT;
 
-       buf_index = READ_ONCE(sqe->buf_index);
+       buf_index = (unsigned long) req->rw.kiocb.private;
        if (unlikely(buf_index >= ctx->nr_user_bufs))
                return -EFAULT;
 
        index = array_index_nospec(buf_index, ctx->nr_user_bufs);
        imu = &ctx->user_bufs[index];
-       buf_addr = READ_ONCE(sqe->addr);
+       buf_addr = req->rw.addr;
 
        /* overflow */
        if (buf_addr + len < buf_addr)
@@ -1593,25 +1657,20 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
                               struct iovec **iovec, struct iov_iter *iter)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-       void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
-       size_t sqe_len = READ_ONCE(sqe->len);
+       void __user *buf = u64_to_user_ptr(req->rw.addr);
+       size_t sqe_len = req->rw.len;
        u8 opcode;
 
-       /*
-        * We're reading ->opcode for the second time, but the first read
-        * doesn't care whether it's _FIXED or not, so it doesn't matter
-        * whether ->opcode changes concurrently. The first read does care
-        * about whether it is a READ or a WRITE, so we don't trust this read
-        * for that purpose and instead let the caller pass in the read/write
-        * flag.
-        */
-       opcode = READ_ONCE(sqe->opcode);
+       opcode = req->opcode;
        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
                *iovec = NULL;
-               return io_import_fixed(req->ctx, rw, sqe, iter);
+               return io_import_fixed(req, rw, iter);
        }
 
+       /* buffer index only valid with fixed read/write */
+       if (req->rw.kiocb.private)
+               return -EINVAL;
+
        if (req->io) {
                struct io_async_rw *iorw = &req->io->rw;
 
@@ -1692,7 +1751,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
        return ret;
 }
 
-static void io_req_map_io(struct io_kiocb *req, ssize_t io_size,
+static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
                          struct iovec *iovec, struct iovec *fast_iov,
                          struct iov_iter *iter)
 {
@@ -1706,57 +1765,85 @@ static void io_req_map_io(struct io_kiocb *req, ssize_t io_size,
        }
 }
 
-static int io_setup_async_io(struct io_kiocb *req, ssize_t io_size,
+static int io_alloc_async_ctx(struct io_kiocb *req)
+{
+       req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
+       return req->io == NULL;
+}
+
+static void io_rw_async(struct io_wq_work **workptr)
+{
+       struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+       struct iovec *iov = NULL;
+
+       if (req->io->rw.iov != req->io->rw.fast_iov)
+               iov = req->io->rw.iov;
+       io_wq_submit_work(workptr);
+       kfree(iov);
+}
+
+static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
                             struct iovec *iovec, struct iovec *fast_iov,
                             struct iov_iter *iter)
 {
-       req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
-       if (req->io) {
-               io_req_map_io(req, io_size, iovec, fast_iov, iter);
-               memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe));
-               req->sqe = &req->io->sqe;
+       if (req->opcode == IORING_OP_READ_FIXED ||
+           req->opcode == IORING_OP_WRITE_FIXED)
                return 0;
-       }
+       if (!req->io && io_alloc_async_ctx(req))
+               return -ENOMEM;
 
-       return -ENOMEM;
+       io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+       req->work.func = io_rw_async;
+       return 0;
 }
 
-static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
-                       struct iov_iter *iter, bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       ret = io_prep_rw(req, force_nonblock);
+       ret = io_prep_rw(req, sqe, force_nonblock);
        if (ret)
                return ret;
 
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
 
-       return io_import_iovec(READ, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
                   bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter iter;
-       struct file *file;
        size_t iov_count;
        ssize_t io_size, ret;
 
-       if (!req->io) {
-               ret = io_read_prep(req, &iovec, &iter, force_nonblock);
-               if (ret < 0)
-                       return ret;
-       } else {
-               ret = io_import_iovec(READ, req, &iovec, &iter);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = io_import_iovec(READ, req, &iovec, &iter);
+       if (ret < 0)
+               return ret;
 
-       file = req->file;
+       /* Ensure we clear previously set non-block flag */
+       if (!force_nonblock)
+               req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
+
+       req->result = 0;
        io_size = ret;
        if (req->flags & REQ_F_LINK)
                req->result = io_size;
@@ -1765,39 +1852,27 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(file)) {
+       if (force_nonblock && !io_file_supports_async(req->file)) {
                req->flags |= REQ_F_MUST_PUNT;
                goto copy_iov;
        }
 
        iov_count = iov_iter_count(&iter);
-       ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
+       ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
        if (!ret) {
                ssize_t ret2;
 
-               if (file->f_op->read_iter)
-                       ret2 = call_read_iter(file, kiocb, &iter);
+               if (req->file->f_op->read_iter)
+                       ret2 = call_read_iter(req->file, kiocb, &iter);
                else
-                       ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+                       ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
 
-               /*
-                * In case of a short read, punt to async. This can happen
-                * if we have data partially cached. Alternatively we can
-                * return the short read, in which case the application will
-                * need to issue another SQE and wait for it. That SQE will
-                * need async punt anyway, so it's more efficient to do it
-                * here.
-                */
-               if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
-                   (req->flags & REQ_F_ISREG) &&
-                   ret2 > 0 && ret2 < io_size)
-                       ret2 = -EAGAIN;
                /* Catch -EAGAIN return for forced non-blocking submission */
                if (!force_nonblock || ret2 != -EAGAIN) {
                        kiocb_done(kiocb, ret2, nxt, req->in_async);
                } else {
 copy_iov:
-                       ret = io_setup_async_io(req, io_size, iovec,
+                       ret = io_setup_async_rw(req, io_size, iovec,
                                                inline_vecs, &iter);
                        if (ret)
                                goto out_free;
@@ -1805,46 +1880,58 @@ copy_iov:
                }
        }
 out_free:
-       kfree(iovec);
+       if (!io_wq_current_is_worker())
+               kfree(iovec);
        return ret;
 }
 
-static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
-                        struct iov_iter *iter, bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                        bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       ret = io_prep_rw(req, force_nonblock);
+       ret = io_prep_rw(req, sqe, force_nonblock);
        if (ret)
                return ret;
 
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
 
-       return io_import_iovec(WRITE, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                    bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct kiocb *kiocb = &req->rw;
+       struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter iter;
-       struct file *file;
        size_t iov_count;
        ssize_t ret, io_size;
 
-       if (!req->io) {
-               ret = io_write_prep(req, &iovec, &iter, force_nonblock);
-               if (ret < 0)
-                       return ret;
-       } else {
-               ret = io_import_iovec(WRITE, req, &iovec, &iter);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = io_import_iovec(WRITE, req, &iovec, &iter);
+       if (ret < 0)
+               return ret;
 
-       file = kiocb->ki_filp;
+       /* Ensure we clear previously set non-block flag */
+       if (!force_nonblock)
+               req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
+
+       req->result = 0;
        io_size = ret;
        if (req->flags & REQ_F_LINK)
                req->result = io_size;
@@ -1858,11 +1945,13 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                goto copy_iov;
        }
 
-       if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
+       /* file path doesn't support NOWAIT for non-direct_IO */
+       if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
+           (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
        iov_count = iov_iter_count(&iter);
-       ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
+       ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
        if (!ret) {
                ssize_t ret2;
 
@@ -1874,22 +1963,22 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
                 * we return to userspace.
                 */
                if (req->flags & REQ_F_ISREG) {
-                       __sb_start_write(file_inode(file)->i_sb,
+                       __sb_start_write(file_inode(req->file)->i_sb,
                                                SB_FREEZE_WRITE, true);
-                       __sb_writers_release(file_inode(file)->i_sb,
+                       __sb_writers_release(file_inode(req->file)->i_sb,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
 
-               if (file->f_op->write_iter)
-                       ret2 = call_write_iter(file, kiocb, &iter);
+               if (req->file->f_op->write_iter)
+                       ret2 = call_write_iter(req->file, kiocb, &iter);
                else
-                       ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
+                       ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
                if (!force_nonblock || ret2 != -EAGAIN) {
                        kiocb_done(kiocb, ret2, nxt, req->in_async);
                } else {
 copy_iov:
-                       ret = io_setup_async_io(req, io_size, iovec,
+                       ret = io_setup_async_rw(req, io_size, iovec,
                                                inline_vecs, &iter);
                        if (ret)
                                goto out_free;
@@ -1897,7 +1986,8 @@ copy_iov:
                }
        }
 out_free:
-       kfree(iovec);
+       if (!io_wq_current_is_worker())
+               kfree(iovec);
        return ret;
 }
 
@@ -1928,45 +2018,92 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
                return -EINVAL;
 
+       req->sync.flags = READ_ONCE(sqe->fsync_flags);
+       if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
+               return -EINVAL;
+
+       req->sync.off = READ_ONCE(sqe->off);
+       req->sync.len = READ_ONCE(sqe->len);
        return 0;
 }
 
-static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                   struct io_kiocb **nxt, bool force_nonblock)
+static bool io_req_cancelled(struct io_kiocb *req)
+{
+       if (req->work.flags & IO_WQ_WORK_CANCEL) {
+               req_set_fail_links(req);
+               io_cqring_add_event(req, -ECANCELED);
+               io_put_req(req);
+               return true;
+       }
+
+       return false;
+}
+
+static void io_link_work_cb(struct io_wq_work **workptr)
+{
+       struct io_wq_work *work = *workptr;
+       struct io_kiocb *link = work->data;
+
+       io_queue_linked_timeout(link);
+       work->func = io_wq_submit_work;
+}
+
+static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
+{
+       struct io_kiocb *link;
+
+       io_prep_async_work(nxt, &link);
+       *workptr = &nxt->work;
+       if (link) {
+               nxt->work.flags |= IO_WQ_WORK_CB;
+               nxt->work.func = io_link_work_cb;
+               nxt->work.data = link;
+       }
+}
+
+static void io_fsync_finish(struct io_wq_work **workptr)
 {
-       loff_t sqe_off = READ_ONCE(sqe->off);
-       loff_t sqe_len = READ_ONCE(sqe->len);
-       loff_t end = sqe_off + sqe_len;
-       unsigned fsync_flags;
+       struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+       loff_t end = req->sync.off + req->sync.len;
+       struct io_kiocb *nxt = NULL;
        int ret;
 
-       fsync_flags = READ_ONCE(sqe->fsync_flags);
-       if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
-               return -EINVAL;
+       if (io_req_cancelled(req))
+               return;
 
-       ret = io_prep_fsync(req, sqe);
-       if (ret)
-               return ret;
+       ret = vfs_fsync_range(req->file, req->sync.off,
+                               end > 0 ? end : LLONG_MAX,
+                               req->sync.flags & IORING_FSYNC_DATASYNC);
+       if (ret < 0)
+               req_set_fail_links(req);
+       io_cqring_add_event(req, ret);
+       io_put_req_find_next(req, &nxt);
+       if (nxt)
+               io_wq_assign_next(workptr, nxt);
+}
+
+static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
+                   bool force_nonblock)
+{
+       struct io_wq_work *work, *old_work;
 
        /* fsync always requires a blocking context */
-       if (force_nonblock)
+       if (force_nonblock) {
+               io_put_req(req);
+               req->work.func = io_fsync_finish;
                return -EAGAIN;
+       }
 
-       ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
-                               end > 0 ? end : LLONG_MAX,
-                               fsync_flags & IORING_FSYNC_DATASYNC);
-
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
-       io_cqring_add_event(req, ret);
-       io_put_req_find_next(req, nxt);
+       work = old_work = &req->work;
+       io_fsync_finish(&work);
+       if (work && work != old_work)
+               *nxt = container_of(work, struct io_kiocb, work);
        return 0;
 }
 
 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       int ret = 0;
 
        if (!req->file)
                return -EBADF;
@@ -1976,59 +2113,88 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
                return -EINVAL;
 
-       return ret;
+       req->sync.off = READ_ONCE(sqe->off);
+       req->sync.len = READ_ONCE(sqe->len);
+       req->sync.flags = READ_ONCE(sqe->sync_range_flags);
+       return 0;
 }
 
-static int io_sync_file_range(struct io_kiocb *req,
-                             const struct io_uring_sqe *sqe,
-                             struct io_kiocb **nxt,
-                             bool force_nonblock)
+static void io_sync_file_range_finish(struct io_wq_work **workptr)
 {
-       loff_t sqe_off;
-       loff_t sqe_len;
-       unsigned flags;
+       struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+       struct io_kiocb *nxt = NULL;
        int ret;
 
-       ret = io_prep_sfr(req, sqe);
-       if (ret)
-               return ret;
+       if (io_req_cancelled(req))
+               return;
+
+       ret = sync_file_range(req->file, req->sync.off, req->sync.len,
+                               req->sync.flags);
+       if (ret < 0)
+               req_set_fail_links(req);
+       io_cqring_add_event(req, ret);
+       io_put_req_find_next(req, &nxt);
+       if (nxt)
+               io_wq_assign_next(workptr, nxt);
+}
+
+static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
+                             bool force_nonblock)
+{
+       struct io_wq_work *work, *old_work;
 
        /* sync_file_range always requires a blocking context */
-       if (force_nonblock)
+       if (force_nonblock) {
+               io_put_req(req);
+               req->work.func = io_sync_file_range_finish;
                return -EAGAIN;
+       }
 
-       sqe_off = READ_ONCE(sqe->off);
-       sqe_len = READ_ONCE(sqe->len);
-       flags = READ_ONCE(sqe->sync_range_flags);
+       work = old_work = &req->work;
+       io_sync_file_range_finish(&work);
+       if (work && work != old_work)
+               *nxt = container_of(work, struct io_kiocb, work);
+       return 0;
+}
 
-       ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
+#if defined(CONFIG_NET)
+static void io_sendrecv_async(struct io_wq_work **workptr)
+{
+       struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+       struct iovec *iov = NULL;
 
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
-       io_cqring_add_event(req, ret);
-       io_put_req_find_next(req, nxt);
-       return 0;
+       if (req->io->rw.iov != req->io->rw.fast_iov)
+               iov = req->io->msg.iov;
+       io_wq_submit_work(workptr);
+       kfree(iov);
 }
+#endif
 
-static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct user_msghdr __user *msg;
-       unsigned flags;
+       struct io_sr_msg *sr = &req->sr_msg;
+       struct io_async_ctx *io = req->io;
+
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 
-       flags = READ_ONCE(sqe->msg_flags);
-       msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
-       return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov);
+       if (!io)
+               return 0;
+
+       io->msg.iov = io->msg.fast_iov;
+       return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+                                       &io->msg.iov);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
-static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                     struct io_kiocb **nxt, bool force_nonblock)
+static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
+                     bool force_nonblock)
 {
 #if defined(CONFIG_NET)
+       struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
 
@@ -2037,50 +2203,55 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        sock = sock_from_file(req->file, &ret);
        if (sock) {
-               struct io_async_ctx io, *copy;
+               struct io_async_ctx io;
                struct sockaddr_storage addr;
-               struct msghdr *kmsg;
                unsigned flags;
 
-               flags = READ_ONCE(sqe->msg_flags);
-               if (flags & MSG_DONTWAIT)
-                       req->flags |= REQ_F_NOWAIT;
-               else if (force_nonblock)
-                       flags |= MSG_DONTWAIT;
-
                if (req->io) {
-                       kmsg = &req->io->msg.msg;
-                       kmsg->msg_name = &addr;
+                       kmsg = &req->io->msg;
+                       kmsg->msg.msg_name = &addr;
+                       /* if iov is set, it's allocated already */
+                       if (!kmsg->iov)
+                               kmsg->iov = kmsg->fast_iov;
+                       kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
-                       kmsg = &io.msg.msg;
-                       kmsg->msg_name = &addr;
+                       struct io_sr_msg *sr = &req->sr_msg;
+
+                       kmsg = &io.msg;
+                       kmsg->msg.msg_name = &addr;
+
                        io.msg.iov = io.msg.fast_iov;
-                       ret = io_sendmsg_prep(req, &io);
+                       ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
-               ret = __sys_sendmsg_sock(sock, kmsg, flags);
+               flags = req->sr_msg.msg_flags;
+               if (flags & MSG_DONTWAIT)
+                       req->flags |= REQ_F_NOWAIT;
+               else if (force_nonblock)
+                       flags |= MSG_DONTWAIT;
+
+               ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
                if (force_nonblock && ret == -EAGAIN) {
-                       copy = kmalloc(sizeof(*copy), GFP_KERNEL);
-                       if (!copy) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       memcpy(&copy->msg, &io.msg, sizeof(copy->msg));
-                       req->io = copy;
-                       memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
-                       req->sqe = &req->io->sqe;
-                       return ret;
+                       if (req->io)
+                               return -EAGAIN;
+                       if (io_alloc_async_ctx(req))
+                               return -ENOMEM;
+                       memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
+                       req->work.func = io_sendrecv_async;
+                       return -EAGAIN;
                }
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
        }
 
-out:
+       if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
+               kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_put_req_find_next(req, nxt);
        return 0;
 #else
@@ -2088,26 +2259,32 @@ out:
 #endif
 }
 
-static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_recvmsg_prep(struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct user_msghdr __user *msg;
-       unsigned flags;
+       struct io_sr_msg *sr = &req->sr_msg;
+       struct io_async_ctx *io = req->io;
 
-       flags = READ_ONCE(sqe->msg_flags);
-       msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
-       return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr,
-                                       &io->msg.iov);
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+       if (!io)
+               return 0;
+
+       io->msg.iov = io->msg.fast_iov;
+       return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+                                       &io->msg.uaddr, &io->msg.iov);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
-static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                     struct io_kiocb **nxt, bool force_nonblock)
+static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
+                     bool force_nonblock)
 {
 #if defined(CONFIG_NET)
+       struct io_async_msghdr *kmsg = NULL;
        struct socket *sock;
        int ret;
 
@@ -2116,53 +2293,57 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        sock = sock_from_file(req->file, &ret);
        if (sock) {
-               struct user_msghdr __user *msg;
-               struct io_async_ctx io, *copy;
+               struct io_async_ctx io;
                struct sockaddr_storage addr;
-               struct msghdr *kmsg;
                unsigned flags;
 
-               flags = READ_ONCE(sqe->msg_flags);
-               if (flags & MSG_DONTWAIT)
-                       req->flags |= REQ_F_NOWAIT;
-               else if (force_nonblock)
-                       flags |= MSG_DONTWAIT;
-
-               msg = (struct user_msghdr __user *) (unsigned long)
-                       READ_ONCE(sqe->addr);
                if (req->io) {
-                       kmsg = &req->io->msg.msg;
-                       kmsg->msg_name = &addr;
+                       kmsg = &req->io->msg;
+                       kmsg->msg.msg_name = &addr;
+                       /* if iov is set, it's allocated already */
+                       if (!kmsg->iov)
+                               kmsg->iov = kmsg->fast_iov;
+                       kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
-                       kmsg = &io.msg.msg;
-                       kmsg->msg_name = &addr;
+                       struct io_sr_msg *sr = &req->sr_msg;
+
+                       kmsg = &io.msg;
+                       kmsg->msg.msg_name = &addr;
+
                        io.msg.iov = io.msg.fast_iov;
-                       ret = io_recvmsg_prep(req, &io);
+                       ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.uaddr,
+                                       &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
-               ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags);
+               flags = req->sr_msg.msg_flags;
+               if (flags & MSG_DONTWAIT)
+                       req->flags |= REQ_F_NOWAIT;
+               else if (force_nonblock)
+                       flags |= MSG_DONTWAIT;
+
+               ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
+                                               kmsg->uaddr, flags);
                if (force_nonblock && ret == -EAGAIN) {
-                       copy = kmalloc(sizeof(*copy), GFP_KERNEL);
-                       if (!copy) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       memcpy(copy, &io, sizeof(*copy));
-                       req->io = copy;
-                       memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
-                       req->sqe = &req->io->sqe;
-                       return ret;
+                       if (req->io)
+                               return -EAGAIN;
+                       if (io_alloc_async_ctx(req))
+                               return -ENOMEM;
+                       memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
+                       req->work.func = io_sendrecv_async;
+                       return -EAGAIN;
                }
                if (ret == -ERESTARTSYS)
                        ret = -EINTR;
        }
 
-out:
+       if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
+               kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_put_req_find_next(req, nxt);
        return 0;
 #else
@@ -2170,101 +2351,141 @@ out:
 #endif
 }
 
-static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                    struct io_kiocb **nxt, bool force_nonblock)
+static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       struct sockaddr __user *addr;
-       int __user *addr_len;
-       unsigned file_flags;
-       int flags, ret;
+       struct io_accept *accept = &req->accept;
 
        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
                return -EINVAL;
        if (sqe->ioprio || sqe->len || sqe->buf_index)
                return -EINVAL;
 
-       addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
-       addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
-       flags = READ_ONCE(sqe->accept_flags);
-       file_flags = force_nonblock ? O_NONBLOCK : 0;
+       accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+       accept->flags = READ_ONCE(sqe->accept_flags);
+       return 0;
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
+#if defined(CONFIG_NET)
+static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
+                      bool force_nonblock)
+{
+       struct io_accept *accept = &req->accept;
+       unsigned file_flags;
+       int ret;
+
+       file_flags = force_nonblock ? O_NONBLOCK : 0;
+       ret = __sys_accept4_file(req->file, file_flags, accept->addr,
+                                       accept->addr_len, accept->flags);
+       if (ret == -EAGAIN && force_nonblock)
+               return -EAGAIN;
+       if (ret == -ERESTARTSYS)
+               ret = -EINTR;
+       if (ret < 0)
+               req_set_fail_links(req);
+       io_cqring_add_event(req, ret);
+       io_put_req_find_next(req, nxt);
+       return 0;
+}
+
+static void io_accept_finish(struct io_wq_work **workptr)
+{
+       struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+       struct io_kiocb *nxt = NULL;
+
+       if (io_req_cancelled(req))
+               return;
+       __io_accept(req, &nxt, false);
+       if (nxt)
+               io_wq_assign_next(workptr, nxt);
+}
+#endif
+
+static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
+                    bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+       int ret;
 
-       ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
+       ret = __io_accept(req, nxt, force_nonblock);
        if (ret == -EAGAIN && force_nonblock) {
+               req->work.func = io_accept_finish;
                req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
+               io_put_req(req);
                return -EAGAIN;
        }
-       if (ret == -ERESTARTSYS)
-               ret = -EINTR;
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
-       io_cqring_add_event(req, ret);
-       io_put_req_find_next(req, nxt);
        return 0;
 #else
        return -EOPNOTSUPP;
 #endif
 }
 
-static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       struct sockaddr __user *addr;
-       int addr_len;
+       struct io_connect *conn = &req->connect;
+       struct io_async_ctx *io = req->io;
+
+       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+               return -EINVAL;
+
+       conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       conn->addr_len =  READ_ONCE(sqe->addr2);
 
-       addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
-       addr_len = READ_ONCE(sqe->addr2);
-       return move_addr_to_kernel(addr, addr_len, &io->connect.address);
+       if (!io)
+               return 0;
+
+       return move_addr_to_kernel(conn->addr, conn->addr_len,
+                                       &io->connect.address);
 #else
-       return 0;
+       return -EOPNOTSUPP;
 #endif
 }
 
-static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                     struct io_kiocb **nxt, bool force_nonblock)
+static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
+                     bool force_nonblock)
 {
 #if defined(CONFIG_NET)
        struct io_async_ctx __io, *io;
        unsigned file_flags;
-       int addr_len, ret;
-
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
-               return -EINVAL;
-
-       addr_len = READ_ONCE(sqe->addr2);
-       file_flags = force_nonblock ? O_NONBLOCK : 0;
+       int ret;
 
        if (req->io) {
                io = req->io;
        } else {
-               ret = io_connect_prep(req, &__io);
+               ret = move_addr_to_kernel(req->connect.addr,
+                                               req->connect.addr_len,
+                                               &__io.connect.address);
                if (ret)
                        goto out;
                io = &__io;
        }
 
-       ret = __sys_connect_file(req->file, &io->connect.address, addr_len,
-                                       file_flags);
+       file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+       ret = __sys_connect_file(req->file, &io->connect.address,
+                                       req->connect.addr_len, file_flags);
        if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
-               io = kmalloc(sizeof(*io), GFP_KERNEL);
-               if (!io) {
+               if (req->io)
+                       return -EAGAIN;
+               if (io_alloc_async_ctx(req)) {
                        ret = -ENOMEM;
                        goto out;
                }
-               memcpy(&io->connect, &__io.connect, sizeof(io->connect));
-               req->io = io;
-               memcpy(&io->sqe, req->sqe, sizeof(*req->sqe));
-               req->sqe = &io->sqe;
+               memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
                return -EAGAIN;
        }
        if (ret == -ERESTARTSYS)
                ret = -EINTR;
 out:
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_cqring_add_event(req, ret);
        io_put_req_find_next(req, nxt);
        return 0;
@@ -2279,8 +2500,8 @@ static void io_poll_remove_one(struct io_kiocb *req)
 
        spin_lock(&poll->head->lock);
        WRITE_ONCE(poll->canceled, true);
-       if (!list_empty(&poll->wait->entry)) {
-               list_del_init(&poll->wait->entry);
+       if (!list_empty(&poll->wait.entry)) {
+               list_del_init(&poll->wait.entry);
                io_queue_async_work(req);
        }
        spin_unlock(&poll->head->lock);
@@ -2320,28 +2541,37 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
        return -ENOENT;
 }
 
+static int io_poll_remove_prep(struct io_kiocb *req,
+                              const struct io_uring_sqe *sqe)
+{
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
+           sqe->poll_events)
+               return -EINVAL;
+
+       req->poll.addr = READ_ONCE(sqe->addr);
+       return 0;
+}
+
 /*
  * Find a running poll command that matches one specified in sqe->addr,
  * and remove it if found.
  */
-static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_poll_remove(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       u64 addr;
        int ret;
 
-       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
-           sqe->poll_events)
-               return -EINVAL;
-
+       addr = req->poll.addr;
        spin_lock_irq(&ctx->completion_lock);
-       ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
+       ret = io_poll_cancel(ctx, addr);
        spin_unlock_irq(&ctx->completion_lock);
 
        io_cqring_add_event(req, ret);
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_put_req(req);
        return 0;
 }
@@ -2351,7 +2581,6 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
        struct io_ring_ctx *ctx = req->ctx;
 
        req->poll.done = true;
-       kfree(req->poll.wait);
        if (error)
                io_cqring_fill_event(req, error);
        else
@@ -2389,7 +2618,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
         */
        spin_lock_irq(&ctx->completion_lock);
        if (!mask && ret != -ECANCELED) {
-               add_wait_queue(poll->head, poll->wait);
+               add_wait_queue(poll->head, &poll->wait);
                spin_unlock_irq(&ctx->completion_lock);
                return;
        }
@@ -2399,11 +2628,11 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
 
        io_cqring_ev_posted(ctx);
 
-       if (ret < 0 && req->flags & REQ_F_LINK)
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_put_req_find_next(req, &nxt);
        if (nxt)
-               *workptr = &nxt->work;
+               io_wq_assign_next(workptr, nxt);
 }
 
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -2419,7 +2648,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & poll->events))
                return 0;
 
-       list_del_init(&poll->wait->entry);
+       list_del_init(&poll->wait.entry);
 
        /*
         * Run completion inline if we can. We're using trylock here because
@@ -2460,7 +2689,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
 
        pt->error = 0;
        pt->req->poll.head = head;
-       add_wait_queue(head, pt->req->poll.wait);
+       add_wait_queue(head, &pt->req->poll.wait);
 }
 
 static void io_poll_req_insert(struct io_kiocb *req)
@@ -2472,14 +2701,9 @@ static void io_poll_req_insert(struct io_kiocb *req)
        hlist_add_head(&req->hash_node, list);
 }
 
-static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                      struct io_kiocb **nxt)
+static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_poll_iocb *poll = &req->poll;
-       struct io_ring_ctx *ctx = req->ctx;
-       struct io_poll_table ipt;
-       bool cancel = false;
-       __poll_t mask;
        u16 events;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
@@ -2489,14 +2713,20 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (!poll->file)
                return -EBADF;
 
-       poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL);
-       if (!poll->wait)
-               return -ENOMEM;
-
-       req->io = NULL;
-       INIT_IO_WORK(&req->work, io_poll_complete_work);
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
+       return 0;
+}
+
+static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
+{
+       struct io_poll_iocb *poll = &req->poll;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_poll_table ipt;
+       bool cancel = false;
+       __poll_t mask;
+
+       INIT_IO_WORK(&req->work, io_poll_complete_work);
        INIT_HLIST_NODE(&req->hash_node);
 
        poll->head = NULL;
@@ -2509,9 +2739,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
 
        /* initialized the list so that we can do list_empty checks */
-       INIT_LIST_HEAD(&poll->wait->entry);
-       init_waitqueue_func_entry(poll->wait, io_poll_wake);
-       poll->wait->private = poll;
+       INIT_LIST_HEAD(&poll->wait.entry);
+       init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+       poll->wait.private = poll;
 
        INIT_LIST_HEAD(&req->list);
 
@@ -2520,14 +2750,14 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        spin_lock_irq(&ctx->completion_lock);
        if (likely(poll->head)) {
                spin_lock(&poll->head->lock);
-               if (unlikely(list_empty(&poll->wait->entry))) {
+               if (unlikely(list_empty(&poll->wait.entry))) {
                        if (ipt.error)
                                cancel = true;
                        ipt.error = 0;
                        mask = 0;
                }
                if (mask || ipt.error)
-                       list_del_init(&poll->wait->entry);
+                       list_del_init(&poll->wait.entry);
                else if (cancel)
                        WRITE_ONCE(poll->canceled, true);
                else if (!poll->done) /* actually waiting for an event */
@@ -2567,7 +2797,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 
                /*
                 * Adjust the reqs sequence before the current one because it
-                * will consume a slot in the cq_ring and the the cq_tail
+                * will consume a slot in the cq_ring and the cq_tail
                 * pointer will be increased, otherwise other timeout reqs may
                 * return in advance without waiting for enough wait_nr.
                 */
@@ -2582,8 +2812,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        io_cqring_ev_posted(ctx);
-       if (req->flags & REQ_F_LINK)
-               req->flags |= REQ_F_FAIL_LINK;
+       req_set_fail_links(req);
        io_put_req(req);
        return HRTIMER_NORESTART;
 }
@@ -2608,48 +2837,52 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
        if (ret == -1)
                return -EALREADY;
 
-       if (req->flags & REQ_F_LINK)
-               req->flags |= REQ_F_FAIL_LINK;
+       req_set_fail_links(req);
        io_cqring_fill_event(req, -ECANCELED);
        io_put_req(req);
        return 0;
 }
 
+static int io_timeout_remove_prep(struct io_kiocb *req,
+                                 const struct io_uring_sqe *sqe)
+{
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+               return -EINVAL;
+
+       req->timeout.addr = READ_ONCE(sqe->addr);
+       req->timeout.flags = READ_ONCE(sqe->timeout_flags);
+       if (req->timeout.flags)
+               return -EINVAL;
+
+       return 0;
+}
+
 /*
  * Remove or update an existing timeout command
  */
-static int io_timeout_remove(struct io_kiocb *req,
-                            const struct io_uring_sqe *sqe)
+static int io_timeout_remove(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       unsigned flags;
        int ret;
 
-       if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
-       if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
-               return -EINVAL;
-       flags = READ_ONCE(sqe->timeout_flags);
-       if (flags)
-               return -EINVAL;
-
        spin_lock_irq(&ctx->completion_lock);
-       ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
+       ret = io_timeout_cancel(ctx, req->timeout.addr);
 
        io_cqring_fill_event(req, ret);
        io_commit_cqring(ctx);
        spin_unlock_irq(&ctx->completion_lock);
        io_cqring_ev_posted(ctx);
-       if (ret < 0 && req->flags & REQ_F_LINK)
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_put_req(req);
        return 0;
 }
 
-static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
+static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                           bool is_timeout_link)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_timeout_data *data;
        unsigned flags;
 
@@ -2663,7 +2896,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
        if (flags & ~IORING_TIMEOUT_ABS)
                return -EINVAL;
 
-       data = &io->timeout;
+       req->timeout.count = READ_ONCE(sqe->off);
+
+       if (!req->io && io_alloc_async_ctx(req))
+               return -ENOMEM;
+
+       data = &req->io->timeout;
        data->req = req;
        req->flags |= REQ_F_TIMEOUT;
 
@@ -2676,32 +2914,17 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
                data->mode = HRTIMER_MODE_REL;
 
        hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
-       req->io = io;
        return 0;
 }
 
-static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_timeout(struct io_kiocb *req)
 {
        unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_timeout_data *data;
-       struct io_async_ctx *io;
        struct list_head *entry;
        unsigned span = 0;
 
-       io = req->io;
-       if (!io) {
-               int ret;
-
-               io = kmalloc(sizeof(*io), GFP_KERNEL);
-               if (!io)
-                       return -ENOMEM;
-               ret = io_timeout_prep(req, io, false);
-               if (ret) {
-                       kfree(io);
-                       return ret;
-               }
-       }
        data = &req->io->timeout;
 
        /*
@@ -2709,7 +2932,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
         * timeout event to be satisfied. If it isn't set, then this is
         * a pure timeout request, sequence isn't used.
         */
-       count = READ_ONCE(sqe->off);
+       count = req->timeout.count;
        if (!count) {
                req->flags |= REQ_F_TIMEOUT_NOSEQ;
                spin_lock_irq(&ctx->completion_lock);
@@ -2822,89 +3045,109 @@ done:
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
        io_cqring_ev_posted(ctx);
 
-       if (ret < 0 && (req->flags & REQ_F_LINK))
-               req->flags |= REQ_F_FAIL_LINK;
+       if (ret < 0)
+               req_set_fail_links(req);
        io_put_req_find_next(req, nxt);
 }
 
-static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-                          struct io_kiocb **nxt)
+static int io_async_cancel_prep(struct io_kiocb *req,
+                               const struct io_uring_sqe *sqe)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-
-       if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
            sqe->cancel_flags)
                return -EINVAL;
 
-       io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
+       req->cancel.addr = READ_ONCE(sqe->addr);
        return 0;
 }
 
-static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
 {
-       struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct iov_iter iter;
-       ssize_t ret;
+       struct io_ring_ctx *ctx = req->ctx;
+
+       io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
+       return 0;
+}
 
-       memcpy(&io->sqe, req->sqe, sizeof(io->sqe));
-       req->sqe = &io->sqe;
+static int io_req_defer_prep(struct io_kiocb *req,
+                            const struct io_uring_sqe *sqe)
+{
+       ssize_t ret = 0;
 
-       switch (io->sqe.opcode) {
+       switch (req->opcode) {
+       case IORING_OP_NOP:
+               break;
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
-               ret = io_read_prep(req, &iovec, &iter, true);
+               ret = io_read_prep(req, sqe, true);
                break;
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
-               ret = io_write_prep(req, &iovec, &iter, true);
+               ret = io_write_prep(req, sqe, true);
+               break;
+       case IORING_OP_POLL_ADD:
+               ret = io_poll_add_prep(req, sqe);
+               break;
+       case IORING_OP_POLL_REMOVE:
+               ret = io_poll_remove_prep(req, sqe);
+               break;
+       case IORING_OP_FSYNC:
+               ret = io_prep_fsync(req, sqe);
+               break;
+       case IORING_OP_SYNC_FILE_RANGE:
+               ret = io_prep_sfr(req, sqe);
                break;
        case IORING_OP_SENDMSG:
-               ret = io_sendmsg_prep(req, io);
+               ret = io_sendmsg_prep(req, sqe);
                break;
        case IORING_OP_RECVMSG:
-               ret = io_recvmsg_prep(req, io);
+               ret = io_recvmsg_prep(req, sqe);
                break;
        case IORING_OP_CONNECT:
-               ret = io_connect_prep(req, io);
+               ret = io_connect_prep(req, sqe);
                break;
        case IORING_OP_TIMEOUT:
-               return io_timeout_prep(req, io, false);
+               ret = io_timeout_prep(req, sqe, false);
+               break;
+       case IORING_OP_TIMEOUT_REMOVE:
+               ret = io_timeout_remove_prep(req, sqe);
+               break;
+       case IORING_OP_ASYNC_CANCEL:
+               ret = io_async_cancel_prep(req, sqe);
+               break;
        case IORING_OP_LINK_TIMEOUT:
-               return io_timeout_prep(req, io, true);
+               ret = io_timeout_prep(req, sqe, true);
+               break;
+       case IORING_OP_ACCEPT:
+               ret = io_accept_prep(req, sqe);
+               break;
        default:
-               req->io = io;
-               return 0;
+               printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
+                               req->opcode);
+               ret = -EINVAL;
+               break;
        }
 
-       if (ret < 0)
-               return ret;
-
-       req->io = io;
-       io_req_map_io(req, ret, iovec, inline_vecs, &iter);
-       return 0;
+       return ret;
 }
 
-static int io_req_defer(struct io_kiocb *req)
+static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_async_ctx *io;
        int ret;
 
        /* Still need defer if there is pending req in defer list. */
        if (!req_need_defer(req) && list_empty(&ctx->defer_list))
                return 0;
 
-       io = kmalloc(sizeof(*io), GFP_KERNEL);
-       if (!io)
+       if (!req->io && io_alloc_async_ctx(req))
                return -EAGAIN;
 
-       ret = io_req_defer_prep(req, io);
-       if (ret < 0) {
-               kfree(io);
+       ret = io_req_defer_prep(req, sqe);
+       if (ret < 0)
                return ret;
-       }
 
        spin_lock_irq(&ctx->completion_lock);
        if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
@@ -2918,66 +3161,121 @@ static int io_req_defer(struct io_kiocb *req)
        return -EIOCBQUEUED;
 }
 
-__attribute__((nonnull))
-static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
-                       bool force_nonblock)
+static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       struct io_kiocb **nxt, bool force_nonblock)
 {
-       int ret, opcode;
        struct io_ring_ctx *ctx = req->ctx;
+       int ret;
 
-       opcode = READ_ONCE(req->sqe->opcode);
-       switch (opcode) {
+       switch (req->opcode) {
        case IORING_OP_NOP:
                ret = io_nop(req);
                break;
        case IORING_OP_READV:
-               if (unlikely(req->sqe->buf_index))
-                       return -EINVAL;
-               ret = io_read(req, nxt, force_nonblock);
-               break;
-       case IORING_OP_WRITEV:
-               if (unlikely(req->sqe->buf_index))
-                       return -EINVAL;
-               ret = io_write(req, nxt, force_nonblock);
-               break;
        case IORING_OP_READ_FIXED:
+               if (sqe) {
+                       ret = io_read_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_read(req, nxt, force_nonblock);
                break;
+       case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
+               if (sqe) {
+                       ret = io_write_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_write(req, nxt, force_nonblock);
                break;
        case IORING_OP_FSYNC:
-               ret = io_fsync(req, req->sqe, nxt, force_nonblock);
+               if (sqe) {
+                       ret = io_prep_fsync(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
+               ret = io_fsync(req, nxt, force_nonblock);
                break;
        case IORING_OP_POLL_ADD:
-               ret = io_poll_add(req, req->sqe, nxt);
+               if (sqe) {
+                       ret = io_poll_add_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
+               ret = io_poll_add(req, nxt);
                break;
        case IORING_OP_POLL_REMOVE:
-               ret = io_poll_remove(req, req->sqe);
+               if (sqe) {
+                       ret = io_poll_remove_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
+               ret = io_poll_remove(req);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
-               ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock);
+               if (sqe) {
+                       ret = io_prep_sfr(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
+               ret = io_sync_file_range(req, nxt, force_nonblock);
                break;
        case IORING_OP_SENDMSG:
-               ret = io_sendmsg(req, req->sqe, nxt, force_nonblock);
+               if (sqe) {
+                       ret = io_sendmsg_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
+               ret = io_sendmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_RECVMSG:
-               ret = io_recvmsg(req, req->sqe, nxt, force_nonblock);
+               if (sqe) {
+                       ret = io_recvmsg_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
+               ret = io_recvmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_TIMEOUT:
-               ret = io_timeout(req, req->sqe);
+               if (sqe) {
+                       ret = io_timeout_prep(req, sqe, false);
+                       if (ret)
+                               break;
+               }
+               ret = io_timeout(req);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
-               ret = io_timeout_remove(req, req->sqe);
+               if (sqe) {
+                       ret = io_timeout_remove_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
+               ret = io_timeout_remove(req);
                break;
        case IORING_OP_ACCEPT:
-               ret = io_accept(req, req->sqe, nxt, force_nonblock);
+               if (sqe) {
+                       ret = io_accept_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
+               ret = io_accept(req, nxt, force_nonblock);
                break;
        case IORING_OP_CONNECT:
-               ret = io_connect(req, req->sqe, nxt, force_nonblock);
+               if (sqe) {
+                       ret = io_connect_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
+               ret = io_connect(req, nxt, force_nonblock);
                break;
        case IORING_OP_ASYNC_CANCEL:
-               ret = io_async_cancel(req, req->sqe, nxt);
+               if (sqe) {
+                       ret = io_async_cancel_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
+               ret = io_async_cancel(req, nxt);
                break;
        default:
                ret = -EINVAL;
@@ -2988,29 +3286,24 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
                return ret;
 
        if (ctx->flags & IORING_SETUP_IOPOLL) {
+               const bool in_async = io_wq_current_is_worker();
+
                if (req->result == -EAGAIN)
                        return -EAGAIN;
 
                /* workqueue context doesn't hold uring_lock, grab it now */
-               if (req->in_async)
+               if (in_async)
                        mutex_lock(&ctx->uring_lock);
+
                io_iopoll_req_issued(req);
-               if (req->in_async)
+
+               if (in_async)
                        mutex_unlock(&ctx->uring_lock);
        }
 
        return 0;
 }
 
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
-       struct io_wq_work *work = *workptr;
-       struct io_kiocb *link = work->data;
-
-       io_queue_linked_timeout(link);
-       work->func = io_wq_submit_work;
-}
-
 static void io_wq_submit_work(struct io_wq_work **workptr)
 {
        struct io_wq_work *work = *workptr;
@@ -3018,9 +3311,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
        struct io_kiocb *nxt = NULL;
        int ret = 0;
 
-       /* Ensure we clear previously set non-block flag */
-       req->rw.ki_flags &= ~IOCB_NOWAIT;
-
        if (work->flags & IO_WQ_WORK_CANCEL)
                ret = -ECANCELED;
 
@@ -3028,7 +3318,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
                req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
                req->in_async = true;
                do {
-                       ret = io_issue_sqe(req, &nxt, false);
+                       ret = io_issue_sqe(req, NULL, &nxt, false);
                        /*
                         * We can get EAGAIN for polled IO even though we're
                         * forcing a sync submission from here, since we can't
@@ -3044,40 +3334,35 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
        io_put_req(req);
 
        if (ret) {
-               if (req->flags & REQ_F_LINK)
-                       req->flags |= REQ_F_FAIL_LINK;
+               req_set_fail_links(req);
                io_cqring_add_event(req, ret);
                io_put_req(req);
        }
 
        /* if a dependent link is ready, pass it back */
-       if (!ret && nxt) {
-               struct io_kiocb *link;
-
-               io_prep_async_work(nxt, &link);
-               *workptr = &nxt->work;
-               if (link) {
-                       nxt->work.flags |= IO_WQ_WORK_CB;
-                       nxt->work.func = io_link_work_cb;
-                       nxt->work.data = link;
-               }
-       }
+       if (!ret && nxt)
+               io_wq_assign_next(workptr, nxt);
 }
 
-static bool io_op_needs_file(const struct io_uring_sqe *sqe)
+static bool io_req_op_valid(int op)
 {
-       int op = READ_ONCE(sqe->opcode);
+       return op >= IORING_OP_NOP && op < IORING_OP_LAST;
+}
 
-       switch (op) {
+static int io_req_needs_file(struct io_kiocb *req)
+{
+       switch (req->opcode) {
        case IORING_OP_NOP:
        case IORING_OP_POLL_REMOVE:
        case IORING_OP_TIMEOUT:
        case IORING_OP_TIMEOUT_REMOVE:
        case IORING_OP_ASYNC_CANCEL:
        case IORING_OP_LINK_TIMEOUT:
-               return false;
+               return 0;
        default:
-               return true;
+               if (io_req_op_valid(req->opcode))
+                       return 1;
+               return -EINVAL;
        }
 }
 
@@ -3090,20 +3375,22 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
        return table->files[index & IORING_FILE_TABLE_MASK];
 }
 
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        unsigned flags;
-       int fd;
+       int fd, ret;
 
-       flags = READ_ONCE(req->sqe->flags);
-       fd = READ_ONCE(req->sqe->fd);
+       flags = READ_ONCE(sqe->flags);
+       fd = READ_ONCE(sqe->fd);
 
        if (flags & IOSQE_IO_DRAIN)
                req->flags |= REQ_F_IO_DRAIN;
 
-       if (!io_op_needs_file(req->sqe))
-               return 0;
+       ret = io_req_needs_file(req);
+       if (ret <= 0)
+               return ret;
 
        if (flags & IOSQE_FIXED_FILE) {
                if (unlikely(!ctx->file_table ||
@@ -3179,8 +3466,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        if (prev) {
-               if (prev->flags & REQ_F_LINK)
-                       prev->flags |= REQ_F_FAIL_LINK;
+               req_set_fail_links(prev);
                io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
                                                -ETIME);
                io_put_req(prev);
@@ -3222,22 +3508,23 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
 
        nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
                                        link_list);
-       if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
+       if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
                return NULL;
 
        req->flags |= REQ_F_LINK_TIMEOUT;
        return nxt;
 }
 
-static void __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
+       struct io_kiocb *linked_timeout;
        struct io_kiocb *nxt = NULL;
        int ret;
 
-       ret = io_issue_sqe(req, &nxt, true);
-       if (nxt)
-               io_queue_async_work(nxt);
+again:
+       linked_timeout = io_prep_linked_timeout(req);
+
+       ret = io_issue_sqe(req, sqe, &nxt, true);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -3256,7 +3543,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
                 * submit reference when the iocb is actually submitted.
                 */
                io_queue_async_work(req);
-               return;
+               goto done_req;
        }
 
 err:
@@ -3273,13 +3560,18 @@ err:
        /* and drop final reference, if we failed */
        if (ret) {
                io_cqring_add_event(req, ret);
-               if (req->flags & REQ_F_LINK)
-                       req->flags |= REQ_F_FAIL_LINK;
+               req_set_fail_links(req);
                io_put_req(req);
        }
+done_req:
+       if (nxt) {
+               req = nxt;
+               nxt = NULL;
+               goto again;
+       }
 }
 
-static void io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        int ret;
 
@@ -3289,16 +3581,15 @@ static void io_queue_sqe(struct io_kiocb *req)
        }
        req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
 
-       ret = io_req_defer(req);
+       ret = io_req_defer(req, sqe);
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_cqring_add_event(req, ret);
-                       if (req->flags & REQ_F_LINK)
-                               req->flags |= REQ_F_FAIL_LINK;
+                       req_set_fail_links(req);
                        io_double_put_req(req);
                }
        } else
-               __io_queue_sqe(req);
+               __io_queue_sqe(req, sqe);
 }
 
 static inline void io_queue_link_head(struct io_kiocb *req)
@@ -3307,27 +3598,25 @@ static inline void io_queue_link_head(struct io_kiocb *req)
                io_cqring_add_event(req, -ECANCELED);
                io_double_put_req(req);
        } else
-               io_queue_sqe(req);
+               io_queue_sqe(req, NULL);
 }
 
+#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
+                               IOSQE_IO_HARDLINK)
 
-#define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-
-static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
-                         struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       req->user_data = req->sqe->user_data;
-
        /* enforce forwards compatibility on users */
-       if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
+       if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
                ret = -EINVAL;
                goto err_req;
        }
 
-       ret = io_req_set_file(state, req);
+       ret = io_req_set_file(state, req, sqe);
        if (unlikely(ret)) {
 err_req:
                io_cqring_add_event(req, ret);
@@ -3344,32 +3633,38 @@ err_req:
         */
        if (*link) {
                struct io_kiocb *prev = *link;
-               struct io_async_ctx *io;
 
-               if (req->sqe->flags & IOSQE_IO_DRAIN)
+               if (sqe->flags & IOSQE_IO_DRAIN)
                        (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
 
-               io = kmalloc(sizeof(*io), GFP_KERNEL);
-               if (!io) {
+               if (sqe->flags & IOSQE_IO_HARDLINK)
+                       req->flags |= REQ_F_HARDLINK;
+
+               if (io_alloc_async_ctx(req)) {
                        ret = -EAGAIN;
                        goto err_req;
                }
 
-               ret = io_req_defer_prep(req, io);
+               ret = io_req_defer_prep(req, sqe);
                if (ret) {
-                       kfree(io);
+                       /* fail even hard links since we don't submit */
                        prev->flags |= REQ_F_FAIL_LINK;
                        goto err_req;
                }
                trace_io_uring_link(ctx, req, prev);
                list_add_tail(&req->link_list, &prev->link_list);
-       } else if (req->sqe->flags & IOSQE_IO_LINK) {
+       } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                req->flags |= REQ_F_LINK;
+               if (sqe->flags & IOSQE_IO_HARDLINK)
+                       req->flags |= REQ_F_HARDLINK;
 
                INIT_LIST_HEAD(&req->link_list);
+               ret = io_req_defer_prep(req, sqe);
+               if (ret)
+                       req->flags |= REQ_F_FAIL_LINK;
                *link = req;
        } else {
-               io_queue_sqe(req);
+               io_queue_sqe(req, sqe);
        }
 
        return true;
@@ -3414,14 +3709,15 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
 }
 
 /*
- * Fetch an sqe, if one is available. Note that s->sqe will point to memory
+ * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
  * that is mapped by userspace. This means that care needs to be taken to
  * ensure that reads are stable, as we cannot rely on userspace always
  * being a good citizen. If members of the sqe are validated and then later
  * used, it's important that those reads are done through READ_ONCE() to
  * prevent a re-load down the line.
  */
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                         const struct io_uring_sqe **sqe_ptr)
 {
        struct io_rings *rings = ctx->rings;
        u32 *sq_array = ctx->sq_array;
@@ -3448,7 +3744,9 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
                 * link list.
                 */
                req->sequence = ctx->cached_sq_head;
-               req->sqe = &ctx->sq_sqes[head];
+               *sqe_ptr = &ctx->sq_sqes[head];
+               req->opcode = READ_ONCE((*sqe_ptr)->opcode);
+               req->user_data = READ_ONCE((*sqe_ptr)->user_data);
                ctx->cached_sq_head++;
                return true;
        }
@@ -3480,6 +3778,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
        }
 
        for (i = 0; i < nr; i++) {
+               const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
                unsigned int sqe_flags;
 
@@ -3489,12 +3788,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                                submitted = -EAGAIN;
                        break;
                }
-               if (!io_get_sqring(ctx, req)) {
+               if (!io_get_sqring(ctx, req, &sqe)) {
                        __io_free_req(req);
                        break;
                }
 
-               if (io_sqe_needs_user(req->sqe) && !*mm) {
+               if (io_req_needs_user(req) && !*mm) {
                        mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
                        if (!mm_fault) {
                                use_mm(ctx->sqo_mm);
@@ -3503,22 +3802,21 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
                }
 
                submitted++;
-               sqe_flags = req->sqe->flags;
+               sqe_flags = sqe->flags;
 
                req->ring_file = ring_file;
                req->ring_fd = ring_fd;
                req->has_user = *mm != NULL;
                req->in_async = async;
                req->needs_fixed_file = async;
-               trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
-                                         true, async);
-               if (!io_submit_sqe(req, statep, &link))
+               trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
+               if (!io_submit_sqe(req, sqe, statep, &link))
                        break;
                /*
                 * If previous wasn't linked and we have a linked command,
                 * that's the end of the chain. Submit the previous link.
                 */
-               if (!(sqe_flags & IOSQE_IO_LINK) && link) {
+               if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) && link) {
                        io_queue_link_head(link);
                        link = NULL;
                }
@@ -3647,7 +3945,9 @@ static int io_sq_thread(void *data)
                }
 
                to_submit = min(to_submit, ctx->sq_entries);
+               mutex_lock(&ctx->uring_lock);
                ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
+               mutex_unlock(&ctx->uring_lock);
                if (ret > 0)
                        inflight += ret;
        }
@@ -3676,7 +3976,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
        struct io_ring_ctx *ctx = iowq->ctx;
 
        /*
-        * Wake up if we have enough events, or if a timeout occured since we
+        * Wake up if we have enough events, or if a timeout occurred since we
         * started waiting. For timeouts, we always want to return to userspace,
         * regardless of event count.
         */
@@ -4163,13 +4463,15 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
+       if (up.resv)
+               return -EINVAL;
        if (check_add_overflow(up.offset, nr_args, &done))
                return -EOVERFLOW;
        if (done > ctx->nr_user_files)
                return -EINVAL;
 
        done = 0;
-       fds = (__s32 __user *) up.fds;
+       fds = u64_to_user_ptr(up.fds);
        while (nr_args) {
                struct fixed_file_table *table;
                unsigned index;
@@ -4428,7 +4730,7 @@ static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
                if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
                        return -EFAULT;
 
-               dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
+               dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
                dst->iov_len = ciov.iov_len;
                return 0;
        }
@@ -4742,10 +5044,6 @@ static int io_uring_flush(struct file *file, void *data)
        struct io_ring_ctx *ctx = file->private_data;
 
        io_uring_cancel_files(ctx, data);
-       if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
-               io_cqring_overflow_flush(ctx, true);
-               io_wq_cancel_all(ctx->io_wq);
-       }
        return 0;
 }
 
@@ -4866,6 +5164,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
                                           &cur_mm, false);
                mutex_unlock(&ctx->uring_lock);
+
+               if (submitted != to_submit)
+                       goto out;
        }
        if (flags & IORING_ENTER_GETEVENTS) {
                unsigned nr_events = 0;
@@ -4879,6 +5180,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                }
        }
 
+out:
        percpu_ref_put(&ctx->refs);
 out_fput:
        fdput(f);
index 6970f55daf54341f307a5052eb44b9e2d039ead8..44b6da0328426ba0f133733114973f7d7a526e6a 100644 (file)
@@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
        }
        if (inode) {
                /* userspace relies on this representation of dev_t */
-               seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
+               seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
                                MAJOR(inode->i_sb->s_dev),
                                MINOR(inode->i_sb->s_dev), inode->i_ino);
        } else {
index a63620cdb73a732d1a9f987c7889923c05830535..ccba3c4c44797b54b859acc2378d5f317e6c29be 100644 (file)
@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
        bio->bi_end_io = mpage_end_io;
        bio_set_op_attrs(bio, op, op_flags);
-       guard_bio_eod(op, bio);
+       guard_bio_eod(bio);
        submit_bio(bio);
        return NULL;
 }
index d6c91d1e88cb361f751f825bf84ace990629223b..4fb61e0754ed67f9459a86b5c55c28a3c374c7ff 100644 (file)
@@ -1001,7 +1001,8 @@ static int may_linkat(struct path *link)
  * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
  *                       should be allowed, or not, on files that already
  *                       exist.
- * @dir: the sticky parent directory
+ * @dir_mode: mode bits of directory
+ * @dir_uid: owner of directory
  * @inode: the inode of the file to open
  *
  * Block an O_CREAT open of a FIFO (or a regular file) when:
@@ -1017,18 +1018,18 @@ static int may_linkat(struct path *link)
  *
  * Returns 0 if the open is allowed, -ve on error.
  */
-static int may_create_in_sticky(struct dentry * const dir,
+static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
                                struct inode * const inode)
 {
        if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
            (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
-           likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
-           uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+           likely(!(dir_mode & S_ISVTX)) ||
+           uid_eq(inode->i_uid, dir_uid) ||
            uid_eq(current_fsuid(), inode->i_uid))
                return 0;
 
-       if (likely(dir->d_inode->i_mode & 0002) ||
-           (dir->d_inode->i_mode & 0020 &&
+       if (likely(dir_mode & 0002) ||
+           (dir_mode & 0020 &&
             ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
              (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
                const char *operation = S_ISFIFO(inode->i_mode) ?
@@ -1232,6 +1233,7 @@ static int follow_managed(struct path *path, struct nameidata *nd)
                        BUG_ON(!path->dentry->d_op);
                        BUG_ON(!path->dentry->d_op->d_manage);
                        ret = path->dentry->d_op->d_manage(path, false);
+                       flags = smp_load_acquire(&path->dentry->d_flags);
                        if (ret < 0)
                                break;
                }
@@ -1649,17 +1651,15 @@ again:
        if (IS_ERR(dentry))
                return dentry;
        if (unlikely(!d_in_lookup(dentry))) {
-               if (!(flags & LOOKUP_NO_REVAL)) {
-                       int error = d_revalidate(dentry, flags);
-                       if (unlikely(error <= 0)) {
-                               if (!error) {
-                                       d_invalidate(dentry);
-                                       dput(dentry);
-                                       goto again;
-                               }
+               int error = d_revalidate(dentry, flags);
+               if (unlikely(error <= 0)) {
+                       if (!error) {
+                               d_invalidate(dentry);
                                dput(dentry);
-                               dentry = ERR_PTR(error);
+                               goto again;
                        }
+                       dput(dentry);
+                       dentry = ERR_PTR(error);
                }
        } else {
                old = inode->i_op->lookup(inode, dentry, flags);
@@ -2617,72 +2617,6 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
 }
 EXPORT_SYMBOL(user_path_at_empty);
 
-/**
- * mountpoint_last - look up last component for umount
- * @nd:   pathwalk nameidata - currently pointing at parent directory of "last"
- *
- * This is a special lookup_last function just for umount. In this case, we
- * need to resolve the path without doing any revalidation.
- *
- * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
- * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
- * in almost all cases, this lookup will be served out of the dcache. The only
- * cases where it won't are if nd->last refers to a symlink or the path is
- * bogus and it doesn't exist.
- *
- * Returns:
- * -error: if there was an error during lookup. This includes -ENOENT if the
- *         lookup found a negative dentry.
- *
- * 0:      if we successfully resolved nd->last and found it to not to be a
- *         symlink that needs to be followed.
- *
- * 1:      if we successfully resolved nd->last and found it to be a symlink
- *         that needs to be followed.
- */
-static int
-mountpoint_last(struct nameidata *nd)
-{
-       int error = 0;
-       struct dentry *dir = nd->path.dentry;
-       struct path path;
-
-       /* If we're in rcuwalk, drop out of it to handle last component */
-       if (nd->flags & LOOKUP_RCU) {
-               if (unlazy_walk(nd))
-                       return -ECHILD;
-       }
-
-       nd->flags &= ~LOOKUP_PARENT;
-
-       if (unlikely(nd->last_type != LAST_NORM)) {
-               error = handle_dots(nd, nd->last_type);
-               if (error)
-                       return error;
-               path.dentry = dget(nd->path.dentry);
-       } else {
-               path.dentry = d_lookup(dir, &nd->last);
-               if (!path.dentry) {
-                       /*
-                        * No cached dentry. Mounted dentries are pinned in the
-                        * cache, so that means that this dentry is probably
-                        * a symlink or the path doesn't actually point
-                        * to a mounted dentry.
-                        */
-                       path.dentry = lookup_slow(&nd->last, dir,
-                                            nd->flags | LOOKUP_NO_REVAL);
-                       if (IS_ERR(path.dentry))
-                               return PTR_ERR(path.dentry);
-               }
-       }
-       if (d_flags_negative(smp_load_acquire(&path.dentry->d_flags))) {
-               dput(path.dentry);
-               return -ENOENT;
-       }
-       path.mnt = nd->path.mnt;
-       return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
-}
-
 /**
  * path_mountpoint - look up a path to be umounted
  * @nd:                lookup context
@@ -2699,14 +2633,17 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
        int err;
 
        while (!(err = link_path_walk(s, nd)) &&
-               (err = mountpoint_last(nd)) > 0) {
+               (err = lookup_last(nd)) > 0) {
                s = trailing_symlink(nd);
        }
+       if (!err && (nd->flags & LOOKUP_RCU))
+               err = unlazy_walk(nd);
+       if (!err)
+               err = handle_lookup_down(nd);
        if (!err) {
                *path = nd->path;
                nd->path.mnt = NULL;
                nd->path.dentry = NULL;
-               follow_mount(path);
        }
        terminate_walk(nd);
        return err;
@@ -3265,6 +3202,8 @@ static int do_last(struct nameidata *nd,
                   struct file *file, const struct open_flags *op)
 {
        struct dentry *dir = nd->path.dentry;
+       kuid_t dir_uid = dir->d_inode->i_uid;
+       umode_t dir_mode = dir->d_inode->i_mode;
        int open_flag = op->open_flag;
        bool will_truncate = (open_flag & O_TRUNC) != 0;
        bool got_write = false;
@@ -3395,7 +3334,7 @@ finish_open:
                error = -EISDIR;
                if (d_is_dir(nd->path.dentry))
                        goto out;
-               error = may_create_in_sticky(dir,
+               error = may_create_in_sticky(dir_mode, dir_uid,
                                             d_backing_inode(nd->path.dentry));
                if (unlikely(error))
                        goto out;
index 2fd0c8bcb8c147f28d83ddd883402e8a40a3b7cd..5e1bf611a9eb6922554c405d4aa753c866e81585 100644 (file)
@@ -1728,7 +1728,7 @@ static bool is_mnt_ns_file(struct dentry *dentry)
               dentry->d_fsdata == &mntns_operations;
 }
 
-struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
+static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
 {
        return container_of(ns, struct mnt_namespace, ns);
 }
@@ -3325,8 +3325,8 @@ struct dentry *mount_subtree(struct vfsmount *m, const char *name)
 }
 EXPORT_SYMBOL(mount_subtree);
 
-int ksys_mount(const char __user *dev_name, const char __user *dir_name,
-              const char __user *type, unsigned long flags, void __user *data)
+SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+               char __user *, type, unsigned long, flags, void __user *, data)
 {
        int ret;
        char *kernel_type;
@@ -3359,12 +3359,6 @@ out_type:
        return ret;
 }
 
-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
-               char __user *, type, unsigned long, flags, void __user *, data)
-{
-       return ksys_mount(dev_name, dir_name, type, flags, data);
-}
-
 /*
  * Create a kernel mount representation for a new, prepared superblock
  * (specified by fs_fd) and attach to an open_tree-like file descriptor.
index f64a33d2a1d15f36d258e8db1579f5ed3080a984..2a82dcce5fc169d189f708de67d97b3bce450e63 100644 (file)
@@ -206,7 +206,6 @@ TRACE_DEFINE_ENUM(LOOKUP_AUTOMOUNT);
 TRACE_DEFINE_ENUM(LOOKUP_PARENT);
 TRACE_DEFINE_ENUM(LOOKUP_REVAL);
 TRACE_DEFINE_ENUM(LOOKUP_RCU);
-TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
 TRACE_DEFINE_ENUM(LOOKUP_OPEN);
 TRACE_DEFINE_ENUM(LOOKUP_CREATE);
 TRACE_DEFINE_ENUM(LOOKUP_EXCL);
@@ -224,7 +223,6 @@ TRACE_DEFINE_ENUM(LOOKUP_DOWN);
                        { LOOKUP_PARENT, "PARENT" }, \
                        { LOOKUP_REVAL, "REVAL" }, \
                        { LOOKUP_RCU, "RCU" }, \
-                       { LOOKUP_NO_REVAL, "NO_REVAL" }, \
                        { LOOKUP_OPEN, "OPEN" }, \
                        { LOOKUP_CREATE, "CREATE" }, \
                        { LOOKUP_EXCL, "EXCL" }, \
index 3e77b728a22bd09f4c68f9e79cedd6059581edff..46f2255800091b76d9bf8f6de01cc3cf9690e097 100644 (file)
@@ -57,6 +57,9 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
                 * doing an __iget/iput with SB_ACTIVE clear would actually
                 * evict all inodes with zero i_count from icache which is
                 * unnecessarily violent and may in fact be illegal to do.
+                * However, we should have been called /after/ evict_inodes
+                * removed all zero refcount inodes, in any case.  Test to
+                * be sure.
                 */
                if (!atomic_read(&inode->i_count)) {
                        spin_unlock(&inode->i_lock);
@@ -77,6 +80,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
 
                iput_inode = inode;
 
+               cond_resched();
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);
index a0431642c6b55f4d80f66ddb29d7f0f8f5314bf8..f75767bd623afcb5f5e67e2a61dbf95c51e442f7 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -3,6 +3,7 @@
 #include <linux/pseudo_fs.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/proc_fs.h>
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
 #include <linux/ktime.h>
@@ -11,6 +12,8 @@
 #include <linux/nsfs.h>
 #include <linux/uaccess.h>
 
+#include "internal.h"
+
 static struct vfsmount *nsfs_mnt;
 
 static long ns_ioctl(struct file *filp, unsigned int ioctl,
index 1c4c51f3df60fce4fb0ece07b7a46d821124430c..cda1027d08194242a77709d2340da0f4357864d7 100644 (file)
@@ -3282,6 +3282,7 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
 
        debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
                           &dlm_debug->d_filter_secs);
+       ocfs2_get_dlm_debug(dlm_debug);
 }
 
 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
index 1afe57f425a01e85639dd6192948bb4b6c3a9bbe..68ba354cf3610aa0808fccb4b9575e0c7a915581 100644 (file)
@@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
 
        ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
 
+       if (replayed) {
+               jbd2_journal_lock_updates(journal->j_journal);
+               status = jbd2_journal_flush(journal->j_journal);
+               jbd2_journal_unlock_updates(journal->j_journal);
+               if (status < 0)
+                       mlog_errno(status);
+       }
+
        status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
        if (status < 0) {
                mlog_errno(status);
index b801c635310098bae2ba074bd66d8056c2764140..6220642fe113b69e6303877898bf53b04330dd90 100644 (file)
@@ -227,13 +227,17 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
 struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
 {
        struct ovl_fh *fh;
-       int fh_type, fh_len, dwords;
-       void *buf;
+       int fh_type, dwords;
        int buflen = MAX_HANDLE_SZ;
        uuid_t *uuid = &real->d_sb->s_uuid;
+       int err;
 
-       buf = kmalloc(buflen, GFP_KERNEL);
-       if (!buf)
+       /* Make sure the real fid stays 32bit aligned */
+       BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4);
+       BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255);
+
+       fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL);
+       if (!fh)
                return ERR_PTR(-ENOMEM);
 
        /*
@@ -242,27 +246,19 @@ struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
         * the price or reconnecting the dentry.
         */
        dwords = buflen >> 2;
-       fh_type = exportfs_encode_fh(real, buf, &dwords, 0);
+       fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0);
        buflen = (dwords << 2);
 
-       fh = ERR_PTR(-EIO);
+       err = -EIO;
        if (WARN_ON(fh_type < 0) ||
            WARN_ON(buflen > MAX_HANDLE_SZ) ||
            WARN_ON(fh_type == FILEID_INVALID))
-               goto out;
+               goto out_err;
 
-       BUILD_BUG_ON(MAX_HANDLE_SZ + offsetof(struct ovl_fh, fid) > 255);
-       fh_len = offsetof(struct ovl_fh, fid) + buflen;
-       fh = kmalloc(fh_len, GFP_KERNEL);
-       if (!fh) {
-               fh = ERR_PTR(-ENOMEM);
-               goto out;
-       }
-
-       fh->version = OVL_FH_VERSION;
-       fh->magic = OVL_FH_MAGIC;
-       fh->type = fh_type;
-       fh->flags = OVL_FH_FLAG_CPU_ENDIAN;
+       fh->fb.version = OVL_FH_VERSION;
+       fh->fb.magic = OVL_FH_MAGIC;
+       fh->fb.type = fh_type;
+       fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN;
        /*
         * When we will want to decode an overlay dentry from this handle
         * and all layers are on the same fs, if we get a disconncted real
@@ -270,14 +266,15 @@ struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
         * it to upperdentry or to lowerstack is by checking this flag.
         */
        if (is_upper)
-               fh->flags |= OVL_FH_FLAG_PATH_UPPER;
-       fh->len = fh_len;
-       fh->uuid = *uuid;
-       memcpy(fh->fid, buf, buflen);
+               fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER;
+       fh->fb.len = sizeof(fh->fb) + buflen;
+       fh->fb.uuid = *uuid;
 
-out:
-       kfree(buf);
        return fh;
+
+out_err:
+       kfree(fh);
+       return ERR_PTR(err);
 }
 
 int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
@@ -300,8 +297,8 @@ int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
        /*
         * Do not fail when upper doesn't support xattrs.
         */
-       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
-                                fh ? fh->len : 0, 0);
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh->buf,
+                                fh ? fh->fb.len : 0, 0);
        kfree(fh);
 
        return err;
@@ -317,7 +314,7 @@ static int ovl_set_upper_fh(struct dentry *upper, struct dentry *index)
        if (IS_ERR(fh))
                return PTR_ERR(fh);
 
-       err = ovl_do_setxattr(index, OVL_XATTR_UPPER, fh, fh->len, 0);
+       err = ovl_do_setxattr(index, OVL_XATTR_UPPER, fh->buf, fh->fb.len, 0);
 
        kfree(fh);
        return err;
index 702aa63f6774d75416b245656887394914cb26f4..29abdb1d3b5c6cf235e99e81948ee55b6d676636 100644 (file)
@@ -1170,7 +1170,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
        if (newdentry == trap)
                goto out_dput;
 
-       if (WARN_ON(olddentry->d_inode == newdentry->d_inode))
+       if (olddentry->d_inode == newdentry->d_inode)
                goto out_dput;
 
        err = 0;
index 73c9775215b33ba043468f57d6cb618fbcffe477..70e55588aedc74d966208f1b819335f6750970f3 100644 (file)
@@ -211,10 +211,11 @@ static int ovl_check_encode_origin(struct dentry *dentry)
        return 1;
 }
 
-static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
+static int ovl_dentry_to_fid(struct dentry *dentry, u32 *fid, int buflen)
 {
        struct ovl_fh *fh = NULL;
        int err, enc_lower;
+       int len;
 
        /*
         * Check if we should encode a lower or upper file handle and maybe
@@ -231,11 +232,12 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
                return PTR_ERR(fh);
 
        err = -EOVERFLOW;
-       if (fh->len > buflen)
+       len = OVL_FH_LEN(fh);
+       if (len > buflen)
                goto fail;
 
-       memcpy(buf, (char *)fh, fh->len);
-       err = fh->len;
+       memcpy(fid, fh, len);
+       err = len;
 
 out:
        kfree(fh);
@@ -243,31 +245,16 @@ out:
 
 fail:
        pr_warn_ratelimited("overlayfs: failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
-                           dentry, err, buflen, fh ? (int)fh->len : 0,
-                           fh ? fh->type : 0);
+                           dentry, err, buflen, fh ? (int)fh->fb.len : 0,
+                           fh ? fh->fb.type : 0);
        goto out;
 }
 
-static int ovl_dentry_to_fh(struct dentry *dentry, u32 *fid, int *max_len)
-{
-       int res, len = *max_len << 2;
-
-       res = ovl_d_to_fh(dentry, (char *)fid, len);
-       if (res <= 0)
-               return FILEID_INVALID;
-
-       len = res;
-
-       /* Round up to dwords */
-       *max_len = (len + 3) >> 2;
-       return OVL_FILEID;
-}
-
 static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
                         struct inode *parent)
 {
        struct dentry *dentry;
-       int type;
+       int bytes = *max_len << 2;
 
        /* TODO: encode connectable file handles */
        if (parent)
@@ -277,10 +264,14 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
        if (WARN_ON(!dentry))
                return FILEID_INVALID;
 
-       type = ovl_dentry_to_fh(dentry, fid, max_len);
-
+       bytes = ovl_dentry_to_fid(dentry, fid, bytes);
        dput(dentry);
-       return type;
+       if (bytes <= 0)
+               return FILEID_INVALID;
+
+       *max_len = bytes >> 2;
+
+       return OVL_FILEID_V1;
 }
 
 /*
@@ -777,24 +768,45 @@ out_err:
        goto out;
 }
 
+static struct ovl_fh *ovl_fid_to_fh(struct fid *fid, int buflen, int fh_type)
+{
+       struct ovl_fh *fh;
+
+       /* If on-wire inner fid is aligned - nothing to do */
+       if (fh_type == OVL_FILEID_V1)
+               return (struct ovl_fh *)fid;
+
+       if (fh_type != OVL_FILEID_V0)
+               return ERR_PTR(-EINVAL);
+
+       fh = kzalloc(buflen, GFP_KERNEL);
+       if (!fh)
+               return ERR_PTR(-ENOMEM);
+
+       /* Copy unaligned inner fh into aligned buffer */
+       memcpy(&fh->fb, fid, buflen - OVL_FH_WIRE_OFFSET);
+       return fh;
+}
+
 static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid,
                                       int fh_len, int fh_type)
 {
        struct dentry *dentry = NULL;
-       struct ovl_fh *fh = (struct ovl_fh *) fid;
+       struct ovl_fh *fh = NULL;
        int len = fh_len << 2;
        unsigned int flags = 0;
        int err;
 
-       err = -EINVAL;
-       if (fh_type != OVL_FILEID)
+       fh = ovl_fid_to_fh(fid, len, fh_type);
+       err = PTR_ERR(fh);
+       if (IS_ERR(fh))
                goto out_err;
 
        err = ovl_check_fh_len(fh, len);
        if (err)
                goto out_err;
 
-       flags = fh->flags;
+       flags = fh->fb.flags;
        dentry = (flags & OVL_FH_FLAG_PATH_UPPER) ?
                 ovl_upper_fh_to_d(sb, fh) :
                 ovl_lower_fh_to_d(sb, fh);
@@ -802,12 +814,18 @@ static struct dentry *ovl_fh_to_dentry(struct super_block *sb, struct fid *fid,
        if (IS_ERR(dentry) && err != -ESTALE)
                goto out_err;
 
+out:
+       /* We may have needed to re-align OVL_FILEID_V0 */
+       if (!IS_ERR_OR_NULL(fh) && fh != (void *)fid)
+               kfree(fh);
+
        return dentry;
 
 out_err:
        pr_warn_ratelimited("overlayfs: failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
-                           len, fh_type, flags, err);
-       return ERR_PTR(err);
+                           fh_len, fh_type, flags, err);
+       dentry = ERR_PTR(err);
+       goto out;
 }
 
 static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid,
index bc14781886bf0735fe9cf32681ab87a3f2703efc..b045cf1826fc4cb26d577d05916ab3545577061f 100644 (file)
@@ -200,8 +200,14 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
                        if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
                            (!ovl_verify_lower(dentry->d_sb) &&
                             (is_dir || lowerstat.nlink == 1))) {
-                               stat->ino = lowerstat.ino;
                                lower_layer = ovl_layer_lower(dentry);
+                               /*
+                                * Cannot use origin st_dev;st_ino because
+                                * origin inode content may differ from overlay
+                                * inode content.
+                                */
+                               if (samefs || lower_layer->fsid)
+                                       stat->ino = lowerstat.ino;
                        }
 
                        /*
index c269d603352532057d278e88bcdcd2cf6ec33861..76ff6633917350672caa413964da6700811e9af1 100644 (file)
@@ -84,21 +84,21 @@ static int ovl_acceptable(void *ctx, struct dentry *dentry)
  * Return -ENODATA for "origin unknown".
  * Return <0 for an invalid file handle.
  */
-int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
+int ovl_check_fb_len(struct ovl_fb *fb, int fb_len)
 {
-       if (fh_len < sizeof(struct ovl_fh) || fh_len < fh->len)
+       if (fb_len < sizeof(struct ovl_fb) || fb_len < fb->len)
                return -EINVAL;
 
-       if (fh->magic != OVL_FH_MAGIC)
+       if (fb->magic != OVL_FH_MAGIC)
                return -EINVAL;
 
        /* Treat larger version and unknown flags as "origin unknown" */
-       if (fh->version > OVL_FH_VERSION || fh->flags & ~OVL_FH_FLAG_ALL)
+       if (fb->version > OVL_FH_VERSION || fb->flags & ~OVL_FH_FLAG_ALL)
                return -ENODATA;
 
        /* Treat endianness mismatch as "origin unknown" */
-       if (!(fh->flags & OVL_FH_FLAG_ANY_ENDIAN) &&
-           (fh->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN)
+       if (!(fb->flags & OVL_FH_FLAG_ANY_ENDIAN) &&
+           (fb->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN)
                return -ENODATA;
 
        return 0;
@@ -119,15 +119,15 @@ static struct ovl_fh *ovl_get_fh(struct dentry *dentry, const char *name)
        if (res == 0)
                return NULL;
 
-       fh = kzalloc(res, GFP_KERNEL);
+       fh = kzalloc(res + OVL_FH_WIRE_OFFSET, GFP_KERNEL);
        if (!fh)
                return ERR_PTR(-ENOMEM);
 
-       res = vfs_getxattr(dentry, name, fh, res);
+       res = vfs_getxattr(dentry, name, fh->buf, res);
        if (res < 0)
                goto fail;
 
-       err = ovl_check_fh_len(fh, res);
+       err = ovl_check_fb_len(&fh->fb, res);
        if (err < 0) {
                if (err == -ENODATA)
                        goto out;
@@ -158,12 +158,12 @@ struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
         * Make sure that the stored uuid matches the uuid of the lower
         * layer where file handle will be decoded.
         */
-       if (!uuid_equal(&fh->uuid, &mnt->mnt_sb->s_uuid))
+       if (!uuid_equal(&fh->fb.uuid, &mnt->mnt_sb->s_uuid))
                return NULL;
 
-       bytes = (fh->len - offsetof(struct ovl_fh, fid));
-       real = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
-                                 bytes >> 2, (int)fh->type,
+       bytes = (fh->fb.len - offsetof(struct ovl_fb, fid));
+       real = exportfs_decode_fh(mnt, (struct fid *)fh->fb.fid,
+                                 bytes >> 2, (int)fh->fb.type,
                                  connected ? ovl_acceptable : NULL, mnt);
        if (IS_ERR(real)) {
                /*
@@ -173,7 +173,7 @@ struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
                 * index entries correctly.
                 */
                if (real == ERR_PTR(-ESTALE) &&
-                   !(fh->flags & OVL_FH_FLAG_PATH_UPPER))
+                   !(fh->fb.flags & OVL_FH_FLAG_PATH_UPPER))
                        real = NULL;
                return real;
        }
@@ -323,6 +323,14 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
        int i;
 
        for (i = 0; i < ofs->numlower; i++) {
+               /*
+                * If lower fs uuid is not unique among lower fs we cannot match
+                * fh->uuid to layer.
+                */
+               if (ofs->lower_layers[i].fsid &&
+                   ofs->lower_layers[i].fs->bad_uuid)
+                       continue;
+
                origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
                                            connected);
                if (origin)
@@ -400,7 +408,7 @@ static int ovl_verify_fh(struct dentry *dentry, const char *name,
        if (IS_ERR(ofh))
                return PTR_ERR(ofh);
 
-       if (fh->len != ofh->len || memcmp(fh, ofh, fh->len))
+       if (fh->fb.len != ofh->fb.len || memcmp(&fh->fb, &ofh->fb, fh->fb.len))
                err = -ESTALE;
 
        kfree(ofh);
@@ -431,7 +439,7 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
 
        err = ovl_verify_fh(dentry, name, fh);
        if (set && err == -ENODATA)
-               err = ovl_do_setxattr(dentry, name, fh, fh->len, 0);
+               err = ovl_do_setxattr(dentry, name, fh->buf, fh->fb.len, 0);
        if (err)
                goto fail;
 
@@ -505,20 +513,20 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index)
                goto fail;
 
        err = -EINVAL;
-       if (index->d_name.len < sizeof(struct ovl_fh)*2)
+       if (index->d_name.len < sizeof(struct ovl_fb)*2)
                goto fail;
 
        err = -ENOMEM;
        len = index->d_name.len / 2;
-       fh = kzalloc(len, GFP_KERNEL);
+       fh = kzalloc(len + OVL_FH_WIRE_OFFSET, GFP_KERNEL);
        if (!fh)
                goto fail;
 
        err = -EINVAL;
-       if (hex2bin((u8 *)fh, index->d_name.name, len))
+       if (hex2bin(fh->buf, index->d_name.name, len))
                goto fail;
 
-       err = ovl_check_fh_len(fh, len);
+       err = ovl_check_fb_len(&fh->fb, len);
        if (err)
                goto fail;
 
@@ -597,11 +605,11 @@ static int ovl_get_index_name_fh(struct ovl_fh *fh, struct qstr *name)
 {
        char *n, *s;
 
-       n = kcalloc(fh->len, 2, GFP_KERNEL);
+       n = kcalloc(fh->fb.len, 2, GFP_KERNEL);
        if (!n)
                return -ENOMEM;
 
-       s  = bin2hex(n, fh, fh->len);
+       s  = bin2hex(n, fh->buf, fh->fb.len);
        *name = (struct qstr) QSTR_INIT(n, s - n);
 
        return 0;
index 6934bcf030f0b53ff7292561cb25fe14b7295158..f283b1d69a9ede9f33e65b61177c27fd9dd049ef 100644 (file)
@@ -71,20 +71,36 @@ enum ovl_entry_flag {
 #error Endianness not defined
 #endif
 
-/* The type returned by overlay exportfs ops when encoding an ovl_fh handle */
-#define OVL_FILEID     0xfb
+/* The type used to be returned by overlay exportfs for misaligned fid */
+#define OVL_FILEID_V0  0xfb
+/* The type returned by overlay exportfs for 32bit aligned fid */
+#define OVL_FILEID_V1  0xf8
 
-/* On-disk and in-memeory format for redirect by file handle */
-struct ovl_fh {
+/* On-disk format for "origin" file handle */
+struct ovl_fb {
        u8 version;     /* 0 */
        u8 magic;       /* 0xfb */
        u8 len;         /* size of this header + size of fid */
        u8 flags;       /* OVL_FH_FLAG_* */
        u8 type;        /* fid_type of fid */
        uuid_t uuid;    /* uuid of filesystem */
-       u8 fid[0];      /* file identifier */
+       u32 fid[0];     /* file identifier should be 32bit aligned in-memory */
 } __packed;
 
+/* In-memory and on-wire format for overlay file handle */
+struct ovl_fh {
+       u8 padding[3];  /* make sure fb.fid is 32bit aligned */
+       union {
+               struct ovl_fb fb;
+               u8 buf[0];
+       };
+} __packed;
+
+#define OVL_FH_WIRE_OFFSET     offsetof(struct ovl_fh, fb)
+#define OVL_FH_LEN(fh)         (OVL_FH_WIRE_OFFSET + (fh)->fb.len)
+#define OVL_FH_FID_OFFSET      (OVL_FH_WIRE_OFFSET + \
+                                offsetof(struct ovl_fb, fid))
+
 static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
 {
        int err = vfs_rmdir(dir, dentry);
@@ -302,7 +318,13 @@ static inline void ovl_inode_unlock(struct inode *inode)
 
 
 /* namei.c */
-int ovl_check_fh_len(struct ovl_fh *fh, int fh_len);
+int ovl_check_fb_len(struct ovl_fb *fb, int fb_len);
+
+static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
+{
+       return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET);
+}
+
 struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
                                  bool connected);
 int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
index a8279280e88dd89a5acdbda5f7acd179d5b81569..28348c44ea5b2a808277598bc6206168955d4c50 100644 (file)
@@ -22,6 +22,8 @@ struct ovl_config {
 struct ovl_sb {
        struct super_block *sb;
        dev_t pseudo_dev;
+       /* Unusable (conflicting) uuid */
+       bool bad_uuid;
 };
 
 struct ovl_layer {
index afbcb116a7f1b8d1f26eafcd94c7064bd58b56ef..7621ff176d15ce6920170f6c5064e63f07816522 100644 (file)
@@ -1255,7 +1255,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
 {
        unsigned int i;
 
-       if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt))
+       if (!ofs->config.nfs_export && !ofs->upper_mnt)
                return true;
 
        for (i = 0; i < ofs->numlowerfs; i++) {
@@ -1263,9 +1263,13 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
                 * We use uuid to associate an overlay lower file handle with a
                 * lower layer, so we can accept lower fs with null uuid as long
                 * as all lower layers with null uuid are on the same fs.
+                * if we detect multiple lower fs with the same uuid, we
+                * disable lower file handle decoding on all of them.
                 */
-               if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid))
+               if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) {
+                       ofs->lower_fs[i].bad_uuid = true;
                        return false;
+               }
        }
        return true;
 }
@@ -1277,6 +1281,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
        unsigned int i;
        dev_t dev;
        int err;
+       bool bad_uuid = false;
 
        /* fsid 0 is reserved for upper fs even with non upper overlay */
        if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
@@ -1288,11 +1293,15 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
        }
 
        if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
-               ofs->config.index = false;
-               ofs->config.nfs_export = false;
-               pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
-                       uuid_is_null(&sb->s_uuid) ? "null" : "conflicting",
-                       path->dentry);
+               bad_uuid = true;
+               if (ofs->config.index || ofs->config.nfs_export) {
+                       ofs->config.index = false;
+                       ofs->config.nfs_export = false;
+                       pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+                               uuid_is_null(&sb->s_uuid) ? "null" :
+                                                           "conflicting",
+                               path->dentry);
+               }
        }
 
        err = get_anon_bdev(&dev);
@@ -1303,6 +1312,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
 
        ofs->lower_fs[ofs->numlowerfs].sb = sb;
        ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
+       ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid;
        ofs->numlowerfs++;
 
        return ofs->numlowerfs;
index 87109e761fa5e3f8e994612e774bda07ef5c58c6..57502c3c0fba1bd8fba06693a2c0401054d5c00e 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -364,17 +364,39 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                        ret = -EAGAIN;
                        break;
                }
-               if (signal_pending(current)) {
-                       if (!ret)
-                               ret = -ERESTARTSYS;
-                       break;
-               }
                __pipe_unlock(pipe);
-               if (was_full) {
+
+               /*
+                * We only get here if we didn't actually read anything.
+                *
+                * However, we could have seen (and removed) a zero-sized
+                * pipe buffer, and might have made space in the buffers
+                * that way.
+                *
+                * You can't make zero-sized pipe buffers by doing an empty
+                * write (not even in packet mode), but they can happen if
+                * the writer gets an EFAULT when trying to fill a buffer
+                * that already got allocated and inserted in the buffer
+                * array.
+                *
+                * So we still need to wake up any pending writers in the
+                * _very_ unlikely case that the pipe was full, but we got
+                * no data.
+                */
+               if (unlikely(was_full)) {
                        wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
                        kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
                }
-               wait_event_interruptible(pipe->wait, pipe_readable(pipe));
+
+               /*
+                * But because we didn't read anything, at this point we can
+                * just return directly with -ERESTARTSYS if we're interrupted,
+                * since we've done any required wakeups and there's no need
+                * to mark anything accessed. And we've dropped the lock.
+                */
+               if (wait_event_interruptible(pipe->wait, pipe_readable(pipe)) < 0)
+                       return -ERESTARTSYS;
+
                __pipe_lock(pipe);
                was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
        }
@@ -559,7 +581,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
                }
                wait_event_interruptible(pipe->wait, pipe_writable(pipe));
                __pipe_lock(pipe);
-               was_empty = pipe_empty(head, pipe->tail);
+               was_empty = pipe_empty(pipe->head, pipe->tail);
        }
 out:
        __pipe_unlock(pipe);
index 84ad1c90d535db8fa333ca77b1c1073f5914e4a1..249672bf54fe7b3d973c485019e1d112989c741f 100644 (file)
@@ -631,12 +631,15 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
 
 /**
  * posix_acl_update_mode  -  update mode in set_acl
+ * @inode: target inode
+ * @mode_p: mode (pointer) for update
+ * @acl: acl pointer
  *
  * Update the file mode when setting an ACL: compute the new file permission
  * bits based on the ACL.  In addition, if the ACL is equivalent to the new
- * file mode, set *acl to NULL to indicate that no ACL should be set.
+ * file mode, set *@acl to NULL to indicate that no ACL should be set.
  *
- * As with chmod, clear the setgit bit if the caller is not in the owning group
+ * As with chmod, clear the setgid bit if the caller is not in the owning group
  * or capable of CAP_FSETID (see inode_change_ok).
  *
  * Called from set_acl inode operations.
index 733881a6387b1358a71d311cc70078cdc3aacb72..27ef84d99f59c007e24246fa92ad3917ad674422 100644 (file)
@@ -103,3 +103,7 @@ config PROC_CHILDREN
 config PROC_PID_ARCH_STATUS
        def_bool n
        depends on PROC_FS
+
+config PROC_CPU_RESCTRL
+       def_bool n
+       depends on PROC_FS
index ebea9501afb84cd8969d2a4a4a25361a91e353c3..915686772f0e56fcdcaca8e0ef0193af66b291a3 100644 (file)
@@ -94,6 +94,8 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/stat.h>
 #include <linux/posix-timers.h>
+#include <linux/time_namespace.h>
+#include <linux/resctrl.h>
 #include <trace/events/oom.h>
 #include "internal.h"
 #include "fd.h"
@@ -1533,6 +1535,96 @@ static const struct file_operations proc_pid_sched_autogroup_operations = {
 
 #endif /* CONFIG_SCHED_AUTOGROUP */
 
+#ifdef CONFIG_TIME_NS
+static int timens_offsets_show(struct seq_file *m, void *v)
+{
+       struct task_struct *p;
+
+       p = get_proc_task(file_inode(m->file));
+       if (!p)
+               return -ESRCH;
+       proc_timens_show_offsets(p, m);
+
+       put_task_struct(p);
+
+       return 0;
+}
+
+static ssize_t timens_offsets_write(struct file *file, const char __user *buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct inode *inode = file_inode(file);
+       struct proc_timens_offset offsets[2];
+       char *kbuf = NULL, *pos, *next_line;
+       struct task_struct *p;
+       int ret, noffsets;
+
+       /* Only allow < page size writes at the beginning of the file */
+       if ((*ppos != 0) || (count >= PAGE_SIZE))
+               return -EINVAL;
+
+       /* Slurp in the user data */
+       kbuf = memdup_user_nul(buf, count);
+       if (IS_ERR(kbuf))
+               return PTR_ERR(kbuf);
+
+       /* Parse the user data */
+       ret = -EINVAL;
+       noffsets = 0;
+       for (pos = kbuf; pos; pos = next_line) {
+               struct proc_timens_offset *off = &offsets[noffsets];
+               int err;
+
+               /* Find the end of line and ensure we don't look past it */
+               next_line = strchr(pos, '\n');
+               if (next_line) {
+                       *next_line = '\0';
+                       next_line++;
+                       if (*next_line == '\0')
+                               next_line = NULL;
+               }
+
+               err = sscanf(pos, "%u %lld %lu", &off->clockid,
+                               &off->val.tv_sec, &off->val.tv_nsec);
+               if (err != 3 || off->val.tv_nsec >= NSEC_PER_SEC)
+                       goto out;
+               noffsets++;
+               if (noffsets == ARRAY_SIZE(offsets)) {
+                       if (next_line)
+                               count = next_line - kbuf;
+                       break;
+               }
+       }
+
+       ret = -ESRCH;
+       p = get_proc_task(inode);
+       if (!p)
+               goto out;
+       ret = proc_timens_set_offset(file, p, offsets, noffsets);
+       put_task_struct(p);
+       if (ret)
+               goto out;
+
+       ret = count;
+out:
+       kfree(kbuf);
+       return ret;
+}
+
+static int timens_offsets_open(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, timens_offsets_show, inode);
+}
+
+static const struct file_operations proc_timens_offsets_operations = {
+       .open           = timens_offsets_open,
+       .read           = seq_read,
+       .write          = timens_offsets_write,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+#endif /* CONFIG_TIME_NS */
+
 static ssize_t comm_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *offset)
 {
@@ -3015,6 +3107,9 @@ static const struct pid_entry tgid_base_stuff[] = {
 #endif
 #ifdef CONFIG_SCHED_AUTOGROUP
        REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
+#endif
+#ifdef CONFIG_TIME_NS
+       REG("timens_offsets",  S_IRUGO|S_IWUSR, proc_timens_offsets_operations),
 #endif
        REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
@@ -3060,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
 #endif
 #ifdef CONFIG_CGROUPS
        ONE("cgroup",  S_IRUGO, proc_cgroup_show),
+#endif
+#ifdef CONFIG_PROC_CPU_RESCTRL
+       ONE("cpu_resctrl_groups", S_IRUGO, proc_resctrl_show),
 #endif
        ONE("oom_score",  S_IRUGO, proc_oom_score),
        REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
@@ -3460,6 +3558,9 @@ static const struct pid_entry tid_base_stuff[] = {
 #endif
 #ifdef CONFIG_CGROUPS
        ONE("cgroup",  S_IRUGO, proc_cgroup_show),
+#endif
+#ifdef CONFIG_PROC_CPU_RESCTRL
+       ONE("cpu_resctrl_groups", S_IRUGO, proc_resctrl_show),
 #endif
        ONE("oom_score", S_IRUGO, proc_oom_score),
        REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
index dd2b35f78b09c1fd7d341224773ace3e61650474..8b5c720fe5d749854df263b828e0bc2ae8e1a909 100644 (file)
@@ -33,6 +33,10 @@ static const struct proc_ns_operations *ns_entries[] = {
 #ifdef CONFIG_CGROUPS
        &cgroupns_operations,
 #endif
+#ifdef CONFIG_TIME_NS
+       &timens_operations,
+       &timens_for_children_operations,
+#endif
 };
 
 static const char *proc_ns_get_link(struct dentry *dentry,
index 37bdbec5b402008c89413fec3c8ab84e92e006ff..fd931d3e77be5438ce2fd287a0170a5d18adfd70 100644 (file)
@@ -134,7 +134,7 @@ static int show_stat(struct seq_file *p, void *v)
                softirq         += cpustat[CPUTIME_SOFTIRQ];
                steal           += cpustat[CPUTIME_STEAL];
                guest           += cpustat[CPUTIME_GUEST];
-               guest_nice      += cpustat[CPUTIME_USER];
+               guest_nice      += cpustat[CPUTIME_GUEST_NICE];
                sum             += kstat_cpu_irqs_sum(i);
                sum             += arch_irq_stat_cpu(i);
 
@@ -175,7 +175,7 @@ static int show_stat(struct seq_file *p, void *v)
                softirq         = cpustat[CPUTIME_SOFTIRQ];
                steal           = cpustat[CPUTIME_STEAL];
                guest           = cpustat[CPUTIME_GUEST];
-               guest_nice      = cpustat[CPUTIME_USER];
+               guest_nice      = cpustat[CPUTIME_GUEST_NICE];
                seq_printf(p, "cpu%d", i);
                seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
                seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
index a4c2791ab70baf8c5124a5e00d0e4a374652d13f..5a1b228964fb760020e1d9198f4139a15fe40931 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/time.h>
+#include <linux/time_namespace.h>
 #include <linux/kernel_stat.h>
 
 static int uptime_proc_show(struct seq_file *m, void *v)
@@ -20,6 +21,8 @@ static int uptime_proc_show(struct seq_file *m, void *v)
                nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
 
        ktime_get_boottime_ts64(&uptime);
+       timens_add_boottime(&uptime);
+
        idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
        idle.tv_nsec = rem;
        seq_printf(m, "%lu.%02lu %lu.%02lu\n",
index 8caff834f002668eb79d84d2adf8c108dbc2c640..013486b5125ed78f80f10024c7c1f3d67fe8d647 100644 (file)
@@ -407,6 +407,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
 
        prz = cxt->dprzs[cxt->dump_write_cnt];
 
+       /*
+        * Since this is a new crash dump, we need to reset the buffer in
+        * case it still has an old dump present. Without this, the new dump
+        * will get appended, which would seriously confuse anything trying
+        * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
+        * expects to find a dump header in the beginning of buffer data, so
+        * we must to reset the buffer values, in order to ensure that the
+        * header will be written to the beginning of the buffer.
+        */
+       persistent_ram_zap(prz);
+
        /* Build header and append record contents. */
        hlen = ramoops_write_kmsg_hdr(prz, record);
        if (!hlen)
@@ -572,6 +583,7 @@ static int ramoops_init_przs(const char *name,
                prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
                                               &cxt->ecc_info,
                                               cxt->memtype, flags, label);
+               kfree(label);
                if (IS_ERR(prz_ar[i])) {
                        err = PTR_ERR(prz_ar[i]);
                        dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@@ -617,6 +629,7 @@ static int ramoops_init_prz(const char *name,
        label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
        *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
                                  cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
+       kfree(label);
        if (IS_ERR(*prz)) {
                int err = PTR_ERR(*prz);
 
index 8823f65888f034cd5997a68898f2c7792d8ae69c..1f4d8c06f9be61d8f95747aa1d1b2dd730d0a36d 100644 (file)
@@ -574,7 +574,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
        /* Initialize general buffer state. */
        raw_spin_lock_init(&prz->buffer_lock);
        prz->flags = flags;
-       prz->label = label;
+       prz->label = kstrdup(label, GFP_KERNEL);
 
        ret = persistent_ram_buffer_map(start, size, prz, memtype);
        if (ret)
index b0688c02dc90df44295a52a497948c31e3db7d57..b6a4f692d3457e18f281cf2cc9ebf8a3215a1ef5 100644 (file)
@@ -984,6 +984,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
                 * later.
                 */
                old_inode = inode;
+               cond_resched();
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);
index d26d5ea4de7b8958ea3d48069cd873b50398cd88..de2eceffdee8baf97f584bf644872822fdef8ef3 100644 (file)
@@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
  * filename length, and the above "soft error" worry means
  * that it's probably better left alone until we have that
  * issue clarified.
+ *
+ * Note the PATH_MAX check - it's arbitrary but the real
+ * kernel limit on a possible path component, not NAME_MAX,
+ * which is the technical standard limit.
  */
 static int verify_dirent_name(const char *name, int len)
 {
-       if (!len)
+       if (len <= 0 || len >= PATH_MAX)
                return -EIO;
        if (memchr(name, '/', len))
                return -EIO;
@@ -206,7 +210,7 @@ struct linux_dirent {
 struct getdents_callback {
        struct dir_context ctx;
        struct linux_dirent __user * current_dir;
-       struct linux_dirent __user * previous;
+       int prev_reclen;
        int count;
        int error;
 };
@@ -214,12 +218,13 @@ struct getdents_callback {
 static int filldir(struct dir_context *ctx, const char *name, int namlen,
                   loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct linux_dirent __user * dirent;
+       struct linux_dirent __user *dirent, *prev;
        struct getdents_callback *buf =
                container_of(ctx, struct getdents_callback, ctx);
        unsigned long d_ino;
        int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
                sizeof(long));
+       int prev_reclen;
 
        buf->error = verify_dirent_name(name, namlen);
        if (unlikely(buf->error))
@@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
                buf->error = -EOVERFLOW;
                return -EOVERFLOW;
        }
-       dirent = buf->previous;
-       if (dirent && signal_pending(current))
+       prev_reclen = buf->prev_reclen;
+       if (prev_reclen && signal_pending(current))
                return -EINTR;
-
-       /*
-        * Note! This range-checks 'previous' (which may be NULL).
-        * The real range was checked in getdents
-        */
-       if (!user_access_begin(dirent, sizeof(*dirent)))
-               goto efault;
-       if (dirent)
-               unsafe_put_user(offset, &dirent->d_off, efault_end);
        dirent = buf->current_dir;
+       prev = (void __user *) dirent - prev_reclen;
+       if (!user_access_begin(prev, reclen + prev_reclen))
+               goto efault;
+
+       /* This might be 'dirent->d_off', but if so it will get overwritten */
+       unsafe_put_user(offset, &prev->d_off, efault_end);
        unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
        unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
        unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
        unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
        user_access_end();
 
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
+       buf->current_dir = (void __user *)dirent + reclen;
+       buf->prev_reclen = reclen;
        buf->count -= reclen;
        return 0;
 efault_end:
@@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
                struct linux_dirent __user *, dirent, unsigned int, count)
 {
        struct fd f;
-       struct linux_dirent __user * lastdirent;
        struct getdents_callback buf = {
                .ctx.actor = filldir,
                .count = count,
@@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
+       if (buf.prev_reclen) {
+               struct linux_dirent __user * lastdirent;
+               lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
+
                if (put_user(buf.ctx.pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
@@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
 struct getdents_callback64 {
        struct dir_context ctx;
        struct linux_dirent64 __user * current_dir;
-       struct linux_dirent64 __user * previous;
+       int prev_reclen;
        int count;
        int error;
 };
@@ -307,11 +309,12 @@ struct getdents_callback64 {
 static int filldir64(struct dir_context *ctx, const char *name, int namlen,
                     loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct linux_dirent64 __user *dirent;
+       struct linux_dirent64 __user *dirent, *prev;
        struct getdents_callback64 *buf =
                container_of(ctx, struct getdents_callback64, ctx);
        int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
                sizeof(u64));
+       int prev_reclen;
 
        buf->error = verify_dirent_name(name, namlen);
        if (unlikely(buf->error))
@@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
-       dirent = buf->previous;
-       if (dirent && signal_pending(current))
+       prev_reclen = buf->prev_reclen;
+       if (prev_reclen && signal_pending(current))
                return -EINTR;
-
-       /*
-        * Note! This range-checks 'previous' (which may be NULL).
-        * The real range was checked in getdents
-        */
-       if (!user_access_begin(dirent, sizeof(*dirent)))
-               goto efault;
-       if (dirent)
-               unsafe_put_user(offset, &dirent->d_off, efault_end);
        dirent = buf->current_dir;
+       prev = (void __user *)dirent - prev_reclen;
+       if (!user_access_begin(prev, reclen + prev_reclen))
+               goto efault;
+
+       /* This might be 'dirent->d_off', but if so it will get overwritten */
+       unsafe_put_user(offset, &prev->d_off, efault_end);
        unsafe_put_user(ino, &dirent->d_ino, efault_end);
        unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
        unsafe_put_user(d_type, &dirent->d_type, efault_end);
        unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
        user_access_end();
 
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
+       buf->prev_reclen = reclen;
+       buf->current_dir = (void __user *)dirent + reclen;
        buf->count -= reclen;
        return 0;
+
 efault_end:
        user_access_end();
 efault:
@@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
                    unsigned int count)
 {
        struct fd f;
-       struct linux_dirent64 __user * lastdirent;
        struct getdents_callback64 buf = {
                .ctx.actor = filldir64,
                .count = count,
@@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
+       if (buf.prev_reclen) {
+               struct linux_dirent64 __user * lastdirent;
                typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+
+               lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
                if (__put_user(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
index 62b40df36c98cb0e890ce3c58845c8af4be7f2bf..28b241cd6987053bf95c945d699b933b0222e1cf 100644 (file)
@@ -319,8 +319,12 @@ static int reiserfs_for_each_xattr(struct inode *inode,
 out_dir:
        dput(dir);
 out:
-       /* -ENODATA isn't an error */
-       if (err == -ENODATA)
+       /*
+        * -ENODATA: this object doesn't have any xattrs
+        * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk.
+        * Neither are errors
+        */
+       if (err == -ENODATA || err == -EOPNOTSUPP)
                err = 0;
        return err;
 }
index 4ef2c056269d54c58b18da04235ffffa4a459b1f..c9830924eb125b7671c702c4625270d6b83dad13 100644 (file)
@@ -23,7 +23,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
 
        /*
         * But on 32-bit, we ought to make an effort to keep the two halves of
-        * i_blocks in sync despite SMP or PREEMPT - though stat's
+        * i_blocks in sync despite SMP or PREEMPTION - though stat's
         * generic_fillattr() doesn't bother, and we won't be applying quotas
         * (where i_blocks does become important) at the upper level.
         *
@@ -38,14 +38,14 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
                spin_unlock(&src->i_lock);
 
        /*
-        * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for
+        * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for
         * fsstack_copy_inode_size() to hold some lock around
         * i_size_write(), otherwise i_size_read() may spin forever (see
         * include/linux/fs.h).  We don't necessarily hold i_mutex when this
         * is called, so take i_lock for that case.
         *
         * And if on 32-bit, continue our effort to keep the two halves of
-        * i_blocks in sync despite SMP or PREEMPT: use i_lock  for that case
+        * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case
         * too, and do both at once by combining the tests.
         *
         * There is none of this locking overhead in the 64-bit case.
index cfadab2cbf35fdfd67139e6c341840308adb93b9..cd352530eca906eb64d722592c85e2fa3ef02886 100644 (file)
@@ -448,10 +448,12 @@ void generic_shutdown_super(struct super_block *sb)
                sync_filesystem(sb);
                sb->s_flags &= ~SB_ACTIVE;
 
-               fsnotify_sb_delete(sb);
                cgroup_writeback_umount();
 
+               /* evict all inodes with zero refcount */
                evict_inodes(sb);
+               /* only nonzero refcount inodes can have marks */
+               fsnotify_sb_delete(sb);
 
                if (sb->s_dio_done_wq) {
                        destroy_workqueue(sb->s_dio_done_wq);
index ac7f59a58f947d13df41d8cb1c577c8c6c4ce4f2..c5509d2448e386f9869bf15169e211ee2744b703 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/syscalls.h>
 #include <linux/compat.h>
 #include <linux/rcupdate.h>
+#include <linux/time_namespace.h>
 
 struct timerfd_ctx {
        union {
@@ -196,6 +197,8 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
        }
 
        if (texp != 0) {
+               if (flags & TFD_TIMER_ABSTIME)
+                       texp = timens_ktime_to_host(clockid, texp);
                if (isalarm(ctx)) {
                        if (flags & TFD_TIMER_ABSTIME)
                                alarm_start(&ctx->t.alarm, texp);
index eabc6ac1990641fc6b4c54108d32328a9bee95c6..b79e3fd19d1152388c1972191f0a525a71a8c7f6 100644 (file)
@@ -315,7 +315,7 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
        if (arg.block_size != PAGE_SIZE)
                return -EINVAL;
 
-       if (arg.salt_size > FIELD_SIZEOF(struct fsverity_descriptor, salt))
+       if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
                return -EMSGSIZE;
 
        if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
index c284e10af491c0777b1db7e8cdb64e78ad3a3699..fc93fd88ec89f825ead506fe2787f90d742d1a62 100644 (file)
@@ -2248,24 +2248,32 @@ xfs_alloc_longest_free_extent(
        return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
 }
 
+/*
+ * Compute the minimum length of the AGFL in the given AG.  If @pag is NULL,
+ * return the largest possible minimum length.
+ */
 unsigned int
 xfs_alloc_min_freelist(
        struct xfs_mount        *mp,
        struct xfs_perag        *pag)
 {
+       /* AG btrees have at least 1 level. */
+       static const uint8_t    fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
+       const uint8_t           *levels = pag ? pag->pagf_levels : fake_levels;
        unsigned int            min_free;
 
+       ASSERT(mp->m_ag_maxlevels > 0);
+
        /* space needed by-bno freespace btree */
-       min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
+       min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
                                       mp->m_ag_maxlevels);
        /* space needed by-size freespace btree */
-       min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
+       min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
                                       mp->m_ag_maxlevels);
        /* space needed reverse mapping used space btree */
        if (xfs_sb_version_hasrmapbt(&mp->m_sb))
-               min_free += min_t(unsigned int,
-                                 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
-                                 mp->m_rmap_maxlevels);
+               min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
+                                               mp->m_rmap_maxlevels);
 
        return min_free;
 }
index a9ad1f991ba36412ad47e216cb967092368f4b10..4c2e046fbfaded8a22ffe2e17cf150eb9d27947a 100644 (file)
@@ -4561,7 +4561,7 @@ xfs_bmapi_convert_delalloc(
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
        struct xfs_bmalloca     bma = { NULL };
-       u16                     flags = 0;
+       uint16_t                flags = 0;
        struct xfs_trans        *tp;
        int                     error;
 
@@ -5972,8 +5972,7 @@ xfs_bmap_insert_extents(
                goto del_cursor;
        }
 
-       if (XFS_IS_CORRUPT(mp,
-                          stop_fsb >= got.br_startoff + got.br_blockcount)) {
+       if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
                error = -EFSCORRUPTED;
                goto del_cursor;
        }
index 0aa87cbde49e8819bfd8c7e0ec10edba585c4c7d..dd6fcaaea318a84e7fd999a8e412ae1161705894 100644 (file)
@@ -724,3 +724,24 @@ xfs_dir2_namecheck(
        /* There shouldn't be any slashes or nulls here */
        return !memchr(name, '/', length) && !memchr(name, 0, length);
 }
+
+xfs_dahash_t
+xfs_dir2_hashname(
+       struct xfs_mount        *mp,
+       struct xfs_name         *name)
+{
+       if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb)))
+               return xfs_ascii_ci_hashname(name);
+       return xfs_da_hashname(name->name, name->len);
+}
+
+enum xfs_dacmp
+xfs_dir2_compname(
+       struct xfs_da_args      *args,
+       const unsigned char     *name,
+       int                     len)
+{
+       if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb)))
+               return xfs_ascii_ci_compname(args, name, len);
+       return xfs_da_compname(args, name, len);
+}
index c031c53d0f0d06798a66de7d7bb5621af32added..01ee0b9265721d52688ddcfff6a57f53deae400f 100644 (file)
@@ -175,6 +175,12 @@ extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
 extern xfs_failaddr_t xfs_dir2_sf_verify(struct xfs_inode *ip);
+int xfs_dir2_sf_entsize(struct xfs_mount *mp,
+               struct xfs_dir2_sf_hdr *hdr, int len);
+void xfs_dir2_sf_put_ino(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr,
+               struct xfs_dir2_sf_entry *sfep, xfs_ino_t ino);
+void xfs_dir2_sf_put_ftype(struct xfs_mount *mp,
+               struct xfs_dir2_sf_entry *sfep, uint8_t ftype);
 
 /* xfs_dir2_readdir.c */
 extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
@@ -194,25 +200,8 @@ xfs_dir2_data_entsize(
        return round_up(len, XFS_DIR2_DATA_ALIGN);
 }
 
-static inline xfs_dahash_t
-xfs_dir2_hashname(
-       struct xfs_mount        *mp,
-       struct xfs_name         *name)
-{
-       if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb)))
-               return xfs_ascii_ci_hashname(name);
-       return xfs_da_hashname(name->name, name->len);
-}
-
-static inline enum xfs_dacmp
-xfs_dir2_compname(
-       struct xfs_da_args      *args,
-       const unsigned char     *name,
-       int                     len)
-{
-       if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb)))
-               return xfs_ascii_ci_compname(args, name, len);
-       return xfs_da_compname(args, name, len);
-}
+xfs_dahash_t xfs_dir2_hashname(struct xfs_mount *mp, struct xfs_name *name);
+enum xfs_dacmp xfs_dir2_compname(struct xfs_da_args *args,
+               const unsigned char *name, int len);
 
 #endif /* __XFS_DIR2_PRIV_H__ */
index 8b94d33d232f5097fdce280bc22358c81f2446ad..7b7f6fb2ea3b2549f325b7a333cd59a9f6d91c05 100644 (file)
@@ -37,7 +37,7 @@ static void xfs_dir2_sf_check(xfs_da_args_t *args);
 static void xfs_dir2_sf_toino4(xfs_da_args_t *args);
 static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
 
-static int
+int
 xfs_dir2_sf_entsize(
        struct xfs_mount        *mp,
        struct xfs_dir2_sf_hdr  *hdr,
@@ -84,7 +84,7 @@ xfs_dir2_sf_get_ino(
        return get_unaligned_be64(from) & XFS_MAXINUMBER;
 }
 
-static void
+void
 xfs_dir2_sf_put_ino(
        struct xfs_mount                *mp,
        struct xfs_dir2_sf_hdr          *hdr,
@@ -145,7 +145,7 @@ xfs_dir2_sf_get_ftype(
        return XFS_DIR3_FT_UNKNOWN;
 }
 
-static void
+void
 xfs_dir2_sf_put_ftype(
        struct xfs_mount        *mp,
        struct xfs_dir2_sf_entry *sfep,
index 988cde7744e69dc0a007c861a9eabfc35e959555..5b759af4d1652af97cc5870b68184d741e6401ae 100644 (file)
@@ -2909,3 +2909,67 @@ xfs_ialloc_setup_geometry(
        else
                igeo->ialloc_align = 0;
 }
+
+/* Compute the location of the root directory inode that is laid out by mkfs. */
+xfs_ino_t
+xfs_ialloc_calc_rootino(
+       struct xfs_mount        *mp,
+       int                     sunit)
+{
+       struct xfs_ino_geometry *igeo = M_IGEO(mp);
+       xfs_agblock_t           first_bno;
+
+       /*
+        * Pre-calculate the geometry of AG 0.  We know what it looks like
+        * because libxfs knows how to create allocation groups now.
+        *
+        * first_bno is the first block in which mkfs could possibly have
+        * allocated the root directory inode, once we factor in the metadata
+        * that mkfs formats before it.  Namely, the four AG headers...
+        */
+       first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
+
+       /* ...the two free space btree roots... */
+       first_bno += 2;
+
+       /* ...the inode btree root... */
+       first_bno += 1;
+
+       /* ...the initial AGFL... */
+       first_bno += xfs_alloc_min_freelist(mp, NULL);
+
+       /* ...the free inode btree root... */
+       if (xfs_sb_version_hasfinobt(&mp->m_sb))
+               first_bno++;
+
+       /* ...the reverse mapping btree root... */
+       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+               first_bno++;
+
+       /* ...the reference count btree... */
+       if (xfs_sb_version_hasreflink(&mp->m_sb))
+               first_bno++;
+
+       /*
+        * ...and the log, if it is allocated in the first allocation group.
+        *
+        * This can happen with filesystems that only have a single
+        * allocation group, or very odd geometries created by old mkfs
+        * versions on very small filesystems.
+        */
+       if (mp->m_sb.sb_logstart &&
+           XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
+                first_bno += mp->m_sb.sb_logblocks;
+
+       /*
+        * Now round first_bno up to whatever allocation alignment is given
+        * by the filesystem or was passed in.
+        */
+       if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0)
+               first_bno = roundup(first_bno, sunit);
+       else if (xfs_sb_version_hasalign(&mp->m_sb) &&
+                       mp->m_sb.sb_inoalignmt > 1)
+               first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
+
+       return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
+}
index 323592d563d520f9f940fc1913d475bb6ce90708..72b3468b97b152aeb364e2a933fbfe06fd7dee15 100644 (file)
@@ -152,5 +152,6 @@ int xfs_inobt_insert_rec(struct xfs_btree_cur *cur, uint16_t holemask,
 
 int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
 void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
+xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
 
 #endif /* __XFS_IALLOC_H__ */
index c55cd9a3dec94ea749accd4a136b0e91703b2f11..7a9c04920505af327cd7a4e490273051b93479d5 100644 (file)
@@ -196,6 +196,24 @@ xfs_calc_inode_chunk_res(
        return res;
 }
 
+/*
+ * Per-extent log reservation for the btree changes involved in freeing or
+ * allocating a realtime extent.  We have to be able to log as many rtbitmap
+ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents,
+ * as well as the realtime summary block.
+ */
+static unsigned int
+xfs_rtalloc_log_count(
+       struct xfs_mount        *mp,
+       unsigned int            num_ops)
+{
+       unsigned int            blksz = XFS_FSB_TO_B(mp, 1);
+       unsigned int            rtbmp_bytes;
+
+       rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY;
+       return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
+}
+
 /*
  * Various log reservation values.
  *
@@ -218,13 +236,21 @@ xfs_calc_inode_chunk_res(
 
 /*
  * In a write transaction we can allocate a maximum of 2
- * extents.  This gives:
+ * extents.  This gives (t1):
  *    the inode getting the new extents: inode size
  *    the inode's bmap btree: max depth * block size
  *    the agfs of the ags from which the extents are allocated: 2 * sector
  *    the superblock free block counter: sector size
  *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- * And the bmap_finish transaction can free bmap blocks in a join:
+ * Or, if we're writing to a realtime file (t2):
+ *    the inode getting the new extents: inode size
+ *    the inode's bmap btree: max depth * block size
+ *    the agfs of the ags from which the extents are allocated: 2 * sector
+ *    the superblock free block counter: sector size
+ *    the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes
+ *    the realtime summary: 1 block
+ *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ * And the bmap_finish transaction can free bmap blocks in a join (t3):
  *    the agfs of the ags containing the blocks: 2 * sector size
  *    the agfls of the ags containing the blocks: 2 * sector size
  *    the super block free block counter: sector size
@@ -234,40 +260,72 @@ STATIC uint
 xfs_calc_write_reservation(
        struct xfs_mount        *mp)
 {
-       return XFS_DQUOT_LOGRES(mp) +
-               max((xfs_calc_inode_res(mp, 1) +
+       unsigned int            t1, t2, t3;
+       unsigned int            blksz = XFS_FSB_TO_B(mp, 1);
+
+       t1 = xfs_calc_inode_res(mp, 1) +
+            xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
+            xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+            xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+
+       if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
+               t2 = xfs_calc_inode_res(mp, 1) +
                     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
-                                     XFS_FSB_TO_B(mp, 1)) +
+                                    blksz) +
                     xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
-                                     XFS_FSB_TO_B(mp, 1))));
+                    xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) +
+                    xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz);
+       } else {
+               t2 = 0;
+       }
+
+       t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+            xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+
+       return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
 }
 
 /*
- * In truncating a file we free up to two extents at once.  We can modify:
+ * In truncating a file we free up to two extents at once.  We can modify (t1):
  *    the inode being truncated: inode size
  *    the inode's bmap btree: (max depth + 1) * block size
- * And the bmap_finish transaction can free the blocks and bmap blocks:
+ * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
  *    the agf for each of the ags: 4 * sector size
  *    the agfl for each of the ags: 4 * sector size
  *    the super block to reflect the freed blocks: sector size
  *    worst case split in allocation btrees per extent assuming 4 extents:
  *             4 exts * 2 trees * (2 * max depth - 1) * block size
+ * Or, if it's a realtime file (t3):
+ *    the agf for each of the ags: 2 * sector size
+ *    the agfl for each of the ags: 2 * sector size
+ *    the super block to reflect the freed blocks: sector size
+ *    the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes
+ *    the realtime summary: 2 exts * 1 block
+ *    worst case split in allocation btrees per extent assuming 2 extents:
+ *             2 exts * 2 trees * (2 * max depth - 1) * block size
  */
 STATIC uint
 xfs_calc_itruncate_reservation(
        struct xfs_mount        *mp)
 {
-       return XFS_DQUOT_LOGRES(mp) +
-               max((xfs_calc_inode_res(mp, 1) +
-                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
-                                     XFS_FSB_TO_B(mp, 1))));
+       unsigned int            t1, t2, t3;
+       unsigned int            blksz = XFS_FSB_TO_B(mp, 1);
+
+       t1 = xfs_calc_inode_res(mp, 1) +
+            xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
+
+       t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
+            xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz);
+
+       if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
+               t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) +
+                    xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
+       } else {
+               t3 = 0;
+       }
+
+       return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
 }
 
 /*
index 3362bae28b46bfa53262dbd51d9421670a9146ac..096203119934fbc6ca3947730e5b54116e9810af 100644 (file)
@@ -329,7 +329,7 @@ TRACE_EVENT(xchk_btree_op_error,
                __field(int, level)
                __field(xfs_agnumber_t, agno)
                __field(xfs_agblock_t, bno)
-               __field(int, ptr);
+               __field(int, ptr)
                __field(int, error)
                __field(void *, ret_ip)
        ),
@@ -414,7 +414,7 @@ TRACE_EVENT(xchk_btree_error,
                __field(int, level)
                __field(xfs_agnumber_t, agno)
                __field(xfs_agblock_t, bno)
-               __field(int, ptr);
+               __field(int, ptr)
                __field(void *, ret_ip)
        ),
        TP_fast_assign(
@@ -452,7 +452,7 @@ TRACE_EVENT(xchk_ifork_btree_error,
                __field(int, level)
                __field(xfs_agnumber_t, agno)
                __field(xfs_agblock_t, bno)
-               __field(int, ptr);
+               __field(int, ptr)
                __field(void *, ret_ip)
        ),
        TP_fast_assign(
index 2efd78a9719eac099390cd7a7cda286f0853e5fb..e62fb5216341c25cd99288937881f6ceb1434e8c 100644 (file)
@@ -992,6 +992,7 @@ xfs_prepare_shift(
        struct xfs_inode        *ip,
        loff_t                  offset)
 {
+       struct xfs_mount        *mp = ip->i_mount;
        int                     error;
 
        /*
@@ -1004,6 +1005,17 @@ xfs_prepare_shift(
                        return error;
        }
 
+       /*
+        * Shift operations must stabilize the start block offset boundary along
+        * with the full range of the operation. If we don't, a COW writeback
+        * completion could race with an insert, front merge with the start
+        * extent (after split) during the shift and corrupt the file. Start
+        * with the block just prior to the start to stabilize the boundary.
+        */
+       offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
+       if (offset)
+               offset -= (1 << mp->m_sb.sb_blocklog);
+
        /*
         * Writeback and invalidate cache for the remainder of the file as we're
         * about to shift down every extent from offset to EOF.
index 3458a1264a3f6d0d05ae48130ddfc44d2985b784..3984779e59110e69033f558de18017c9ae539fd0 100644 (file)
@@ -956,7 +956,7 @@ xfs_buf_item_relse(
        struct xfs_buf_log_item *bip = bp->b_log_item;
 
        trace_xfs_buf_item_relse(bp, _RET_IP_);
-       ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
+       ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
 
        bp->b_log_item = NULL;
        if (list_empty(&bp->b_li_list))
index fca65109cf242dd946b0585e116043f213018978..56efe140c923816b9233f7f9008024198dc7f9c5 100644 (file)
@@ -31,7 +31,7 @@
 #include "xfs_reflink.h"
 #include "xfs_extent_busy.h"
 #include "xfs_health.h"
-
+#include "xfs_trace.h"
 
 static DEFINE_MUTEX(xfs_uuid_table_mutex);
 static int xfs_uuid_table_size;
@@ -360,66 +360,119 @@ release_buf:
 }
 
 /*
- * Update alignment values based on mount options and sb values
+ * If the sunit/swidth change would move the precomputed root inode value, we
+ * must reject the ondisk change because repair will stumble over that.
+ * However, we allow the mount to proceed because we never rejected this
+ * combination before.  Returns true to update the sb, false otherwise.
+ */
+static inline int
+xfs_check_new_dalign(
+       struct xfs_mount        *mp,
+       int                     new_dalign,
+       bool                    *update_sb)
+{
+       struct xfs_sb           *sbp = &mp->m_sb;
+       xfs_ino_t               calc_ino;
+
+       calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
+       trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
+
+       if (sbp->sb_rootino == calc_ino) {
+               *update_sb = true;
+               return 0;
+       }
+
+       xfs_warn(mp,
+"Cannot change stripe alignment; would require moving root inode.");
+
+       /*
+        * XXX: Next time we add a new incompat feature, this should start
+        * returning -EINVAL to fail the mount.  Until then, spit out a warning
+        * that we're ignoring the administrator's instructions.
+        */
+       xfs_warn(mp, "Skipping superblock stripe alignment update.");
+       *update_sb = false;
+       return 0;
+}
+
+/*
+ * If we were provided with new sunit/swidth values as mount options, make sure
+ * that they pass basic alignment and superblock feature checks, and convert
+ * them into the same units (FSB) that everything else expects.  This step
+ * /must/ be done before computing the inode geometry.
  */
 STATIC int
-xfs_update_alignment(xfs_mount_t *mp)
+xfs_validate_new_dalign(
+       struct xfs_mount        *mp)
 {
-       xfs_sb_t        *sbp = &(mp->m_sb);
+       if (mp->m_dalign == 0)
+               return 0;
 
-       if (mp->m_dalign) {
+       /*
+        * If stripe unit and stripe width are not multiples
+        * of the fs blocksize turn off alignment.
+        */
+       if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
+           (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
+               xfs_warn(mp,
+       "alignment check failed: sunit/swidth vs. blocksize(%d)",
+                       mp->m_sb.sb_blocksize);
+               return -EINVAL;
+       } else {
                /*
-                * If stripe unit and stripe width are not multiples
-                * of the fs blocksize turn off alignment.
+                * Convert the stripe unit and width to FSBs.
                 */
-               if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
-                   (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
+               mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
+               if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
                        xfs_warn(mp,
-               "alignment check failed: sunit/swidth vs. blocksize(%d)",
-                               sbp->sb_blocksize);
+               "alignment check failed: sunit/swidth vs. agsize(%d)",
+                                mp->m_sb.sb_agblocks);
                        return -EINVAL;
-               } else {
-                       /*
-                        * Convert the stripe unit and width to FSBs.
-                        */
-                       mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
-                       if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
-                               xfs_warn(mp,
-                       "alignment check failed: sunit/swidth vs. agsize(%d)",
-                                        sbp->sb_agblocks);
-                               return -EINVAL;
-                       } else if (mp->m_dalign) {
-                               mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
-                       } else {
-                               xfs_warn(mp,
-                       "alignment check failed: sunit(%d) less than bsize(%d)",
-                                        mp->m_dalign, sbp->sb_blocksize);
-                               return -EINVAL;
-                       }
-               }
-
-               /*
-                * Update superblock with new values
-                * and log changes
-                */
-               if (xfs_sb_version_hasdalign(sbp)) {
-                       if (sbp->sb_unit != mp->m_dalign) {
-                               sbp->sb_unit = mp->m_dalign;
-                               mp->m_update_sb = true;
-                       }
-                       if (sbp->sb_width != mp->m_swidth) {
-                               sbp->sb_width = mp->m_swidth;
-                               mp->m_update_sb = true;
-                       }
+               } else if (mp->m_dalign) {
+                       mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
                } else {
                        xfs_warn(mp,
-       "cannot change alignment: superblock does not support data alignment");
+               "alignment check failed: sunit(%d) less than bsize(%d)",
+                                mp->m_dalign, mp->m_sb.sb_blocksize);
                        return -EINVAL;
                }
+       }
+
+       if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
+               xfs_warn(mp,
+"cannot change alignment: superblock does not support data alignment");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Update alignment values based on mount options and sb values. */
+STATIC int
+xfs_update_alignment(
+       struct xfs_mount        *mp)
+{
+       struct xfs_sb           *sbp = &mp->m_sb;
+
+       if (mp->m_dalign) {
+               bool            update_sb;
+               int             error;
+
+               if (sbp->sb_unit == mp->m_dalign &&
+                   sbp->sb_width == mp->m_swidth)
+                       return 0;
+
+               error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
+               if (error || !update_sb)
+                       return error;
+
+               sbp->sb_unit = mp->m_dalign;
+               sbp->sb_width = mp->m_swidth;
+               mp->m_update_sb = true;
        } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
                    xfs_sb_version_hasdalign(&mp->m_sb)) {
-                       mp->m_dalign = sbp->sb_unit;
-                       mp->m_swidth = sbp->sb_width;
+               mp->m_dalign = sbp->sb_unit;
+               mp->m_swidth = sbp->sb_width;
        }
 
        return 0;
@@ -648,12 +701,12 @@ xfs_mountfs(
        }
 
        /*
-        * Check if sb_agblocks is aligned at stripe boundary
-        * If sb_agblocks is NOT aligned turn off m_dalign since
-        * allocator alignment is within an ag, therefore ag has
-        * to be aligned at stripe boundary.
+        * If we were given new sunit/swidth options, do some basic validation
+        * checks and convert the incore dalign and swidth values to the
+        * same units (FSB) that everything else uses.  This /must/ happen
+        * before computing the inode geometry.
         */
-       error = xfs_update_alignment(mp);
+       error = xfs_validate_new_dalign(mp);
        if (error)
                goto out;
 
@@ -664,6 +717,17 @@ xfs_mountfs(
        xfs_rmapbt_compute_maxlevels(mp);
        xfs_refcountbt_compute_maxlevels(mp);
 
+       /*
+        * Check if sb_agblocks is aligned at stripe boundary.  If sb_agblocks
+        * is NOT aligned turn off m_dalign since allocator alignment is within
+        * an ag, therefore ag has to be aligned at stripe boundary.  Note that
+        * we must compute the free space and rmap btree geometry before doing
+        * this.
+        */
+       error = xfs_update_alignment(mp);
+       if (error)
+               goto out;
+
        /* enable fail_at_unmount as default */
        mp->m_fail_unmount = true;
 
index c13bb3655e489038d5fbdcea328afcc1a012cee9..e242988f57fb4c0ad5f14b46e7017b9dacf7ead9 100644 (file)
@@ -218,8 +218,8 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
        TP_STRUCT__entry(
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
-               __field(void *, leaf);
-               __field(int, pos);
+               __field(void *, leaf)
+               __field(int, pos)
                __field(xfs_fileoff_t, startoff)
                __field(xfs_fsblock_t, startblock)
                __field(xfs_filblks_t, blockcount)
@@ -3573,6 +3573,27 @@ DEFINE_KMEM_EVENT(kmem_alloc_large);
 DEFINE_KMEM_EVENT(kmem_realloc);
 DEFINE_KMEM_EVENT(kmem_zone_alloc);
 
+TRACE_EVENT(xfs_check_new_dalign,
+       TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
+       TP_ARGS(mp, new_dalign, calc_rootino),
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(int, new_dalign)
+               __field(xfs_ino_t, sb_rootino)
+               __field(xfs_ino_t, calc_rootino)
+       ),
+       TP_fast_assign(
+               __entry->dev = mp->m_super->s_dev;
+               __entry->new_dalign = new_dalign;
+               __entry->sb_rootino = mp->m_sb.sb_rootino;
+               __entry->calc_rootino = calc_rootino;
+       ),
+       TP_printk("dev %d:%d new_dalign %d sb_rootino %llu calc_rootino %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->new_dalign, __entry->sb_rootino,
+                 __entry->calc_rootino)
+)
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH
index c2acd29f973d51c8e5c370d3b58a038a27ba8d60..531c1e9a7d105653d77531fb1b0d696b808048de 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acbuffer.h - Support for buffers returned by ACPI predefined names
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 42d573172e5396babc64bc0663e31562befa1c22..5940a3c68a9607493d45ad5ca56efc46bc96d8b4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acconfig.h - Global configuration constants
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 233a72f169bb7e02b6de42d8c2350c9371163f78..436cd1411c3a57bdc1850b3b570b5069da5e2cea 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acexcep.h - Exception codes returned by the ACPI subsystem
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 8b3eae96706a72a739bb96becb8808c1647ff72c..8922edb32730a9d19c9abb18c114206cd9035784 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acnames.h - Global names and strings
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index c50542dc71e0b24912571eaa09039a98d3a6ef9f..c5d900c0ecda3a1a7aaedf8a44a91f4bf2fa015a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acoutput.h -- debug output
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index bc7d39ecf57432583ca0785421165d4b7572d9be..e3e8051d4812b1ed5199b191d549606e34d1c213 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acpi.h - Master public include file used to interface to ACPICA
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2e63b7b390f5a77ed39901ad11f8b084565d7603..33bb8c9a089d24fd36e976a4f68836f4de081daa 100644 (file)
@@ -5,7 +5,7 @@
  *                    interfaces must be implemented by OSL to interface the
  *                    ACPI components to the host operating system.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 18790b9e16b592f51a4dace991dbdd25e769acde..00994b1b8681a32f25b3dfaa0e9d4b9755b9f445 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acpixf.h - External interfaces to the ACPI subsystem
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
@@ -12,7 +12,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20191018
+#define ACPI_CA_VERSION                 0x20200110
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
index 62930583219fb2291b809d0e34d9497966d6be49..d3521894ce6a9f571038b47c891eee1bcbe12b88 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acrestyp.h - Defines, types, and structures for resource descriptors
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d568128025df8e848119ce5a247d5071c35ea745..5007c41f4d54c206534b13925f3cc9220ea9c84e 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: actbl.h - Basic ACPI Table Definitions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 22c039ebc6c55551097bee4fe65d10ef81d9f34c..02d06b79e1cd2e54dacf1fa36c2ac51c909f3ac9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: actbl1.h - Additional ACPI table definitions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index e45ced27f4c30fa4dadd06e2a3d5a166ad0b0c59..b818ba60e19d55a1299788714dde4a13d70c72c9 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 7a58c10ce4216c2238a7d9d5263c15a503089829..2bf3baf819bbed9afc9ac7fdebb91a63afa1bc7e 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: actbl3.h - ACPI Table Definitions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2f3f28c7cea369a99c6ecab1847cc6feba129d5c..a2583c2bc0548c8da7ec53f6c535c636661e8ced 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: actypes.h - Common data types for the entire ACPI subsystem
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 23262cab047a70d69f15b2d1e6ba818937664872..9dd4689a39cf8427dfee12e72fae06d5fc823b26 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acuuid.h - ACPI-related UUID/GUID definitions
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 35ab3f87cc2918977b7714b4c35d23894e4c902a..8f6b2654c0b3538c7623e4ad1a2bd79092dfa8ea 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acenv.h - Host and compiler configuration
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
 #endif
 
 
+/*
+ * acpisrc CR\LF support
+ * Unix file line endings do not include the carriage return.
+ * If the acpisrc utility is being built using a microsoft compiler, it means
+ * that it will be running on a windows machine which means that the output is
+ * expected to have CR/LF newlines. If the acpisrc utility is built with
+ * anything else, it will likely run on a system with LF newlines. This flag
+ * tells the acpisrc utility that newlines will be in the LF format.
+ */
+#define ACPI_SRC_OS_LF_ONLY 0
+
 /*! [Begin] no source code translation */
 
 /******************************************************************************
index 2e36c83448974ded46a9f3b88847bef2ddf046de..c3facf5f849502a554284e93d198b903ee6f8d69 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acenvex.h - Extra host and compiler configuration
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 6a0705b433d2c5ee3cecac32f0e10cbb47a4ba9f..7d63d03cf5077a0d92718cf062e38ad745dac45c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acgcc.h - GCC specific defines, etc.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 8dda2856aca1cfe52b2fdbfa3c51e36506a8b207..7c88fd1de95596e194df4102af265c591f709d07 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acgccex.h - Extra GCC specific defines, etc.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d2cc247248cb234b1f4a1b45ed989b247f1c7cca..e7fd5e71be62bdb2f52c183fad226522278357ca 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: acintel.h - VC specific defines, etc.
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 310501994c028561c2dda7feeed24efc9ecddad2..987e2af7c3356857298fe68902d923054af478e0 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: aclinux.h - OS specific defines, etc. for Linux
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index cc4f1eb53cba25a4c99fb02f71adaa1a55fb3f58..04f88f2de7816cb419bd16d36e7824d1ef9f0281 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index a950a22c489041e78c05094553bb254313c9d636..cac7404b2bdd2cdcd1c4041f496aecbf4e9245fe 100644 (file)
  * The cache doesn't need to be flushed when TLB entries change when
  * the cache is mapped to physical memory, not virtual memory
  */
+#ifndef flush_cache_all
 static inline void flush_cache_all(void)
 {
 }
+#endif
 
+#ifndef flush_cache_mm
 static inline void flush_cache_mm(struct mm_struct *mm)
 {
 }
+#endif
 
+#ifndef flush_cache_dup_mm
 static inline void flush_cache_dup_mm(struct mm_struct *mm)
 {
 }
+#endif
 
+#ifndef flush_cache_range
 static inline void flush_cache_range(struct vm_area_struct *vma,
                                     unsigned long start,
                                     unsigned long end)
 {
 }
+#endif
 
+#ifndef flush_cache_page
 static inline void flush_cache_page(struct vm_area_struct *vma,
                                    unsigned long vmaddr,
                                    unsigned long pfn)
 {
 }
+#endif
 
+#ifndef flush_dcache_page
 static inline void flush_dcache_page(struct page *page)
 {
 }
+#endif
 
+#ifndef flush_dcache_mmap_lock
 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
 {
 }
+#endif
 
+#ifndef flush_dcache_mmap_unlock
 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
 {
 }
+#endif
 
+#ifndef flush_icache_range
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
 }
+#endif
 
+#ifndef flush_icache_page
 static inline void flush_icache_page(struct vm_area_struct *vma,
                                     struct page *page)
 {
 }
+#endif
 
+#ifndef flush_icache_user_range
 static inline void flush_icache_user_range(struct vm_area_struct *vma,
                                           struct page *page,
                                           unsigned long addr, int len)
 {
 }
+#endif
 
+#ifndef flush_cache_vmap
 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 {
 }
+#endif
 
+#ifndef flush_cache_vunmap
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 {
 }
+#endif
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)     \
        do { \
                memcpy(dst, src, len); \
                flush_icache_user_range(vma, page, vaddr, len); \
        } while (0)
+#endif
+
+#ifndef copy_from_user_page
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy(dst, src, len)
+#endif
 
 #endif /* __ASM_CACHEFLUSH_H */
index 325fc98cc9ff75109702c8e9bcf8f26fb82fa5c5..d39ac997dda8d3b36b2bbb3f3736a27e022e8f45 100644 (file)
@@ -960,10 +960,6 @@ static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
 }
 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
 
-#ifndef ioremap_nocache
-#define ioremap_nocache ioremap
-#endif
-
 #ifndef ioremap_wc
 #define ioremap_wc ioremap
 #endif
index a008f504a2d0ace885b760fcdfd9ba2367906b11..9d28a5e82f73149390ccc08715768a49893d6eca 100644 (file)
@@ -94,11 +94,11 @@ extern void ioport_unmap(void __iomem *);
 #endif
 
 #ifndef ARCH_HAS_IOREMAP_WC
-#define ioremap_wc ioremap_nocache
+#define ioremap_wc ioremap
 #endif
 
 #ifndef ARCH_HAS_IOREMAP_WT
-#define ioremap_wt ioremap_nocache
+#define ioremap_wt ioremap
 #endif
 
 #ifdef CONFIG_PCI
index ce41032086193dfb98e9ad1b444624d0dee7a207..cec543d9e87bf99304efea9f7771d745630d3f69 100644 (file)
@@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
 #endif /* __arch_get_k_vdso_data */
 
 #ifndef __arch_update_vdso_data
-static __always_inline int __arch_update_vdso_data(void)
+static __always_inline bool __arch_update_vdso_data(void)
 {
-       return 0;
+       return true;
 }
 #endif /* __arch_update_vdso_data */
 
index 553e539469f04cb74db1b36a213dec794fad10b2..34eef083c9882fafbd3bf5cc25a0dd5fdb38b06d 100644 (file)
@@ -30,7 +30,7 @@ extern void hv_stimer_global_cleanup(void);
 extern void hv_stimer0_isr(void);
 
 #ifdef CONFIG_HYPERV_TIMER
-extern struct clocksource *hyperv_cs;
+extern u64 (*hv_read_reference_counter)(void);
 extern void hv_init_clocksource(void);
 
 extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
index d5fc90b304876697740c7d994a78ea8692e9f85b..c1bda7030e2de8ad3e904471b0264c9effccfcde 100644 (file)
@@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr {
         * &drm_dp_sideband_msg_tx.state once they are queued
         */
        struct mutex qlock;
+
+       /**
+        * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+        */
+       bool is_waiting_for_dwn_reply;
+
        /**
         * @tx_msg_downq: List of pending down replies.
         */
diff --git a/include/dt-bindings/dma/x1830-dma.h b/include/dt-bindings/dma/x1830-dma.h
new file mode 100644 (file)
index 0000000..35bcb89
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1830 DMA bindings.
+ *
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1830_DMA_H__
+#define __DT_BINDINGS_DMA_X1830_DMA_H__
+
+/*
+ * Request type numbers for the X1830 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1830_DMA_I2S0_TX      0x6
+#define X1830_DMA_I2S0_RX      0x7
+#define X1830_DMA_AUTO         0x8
+#define X1830_DMA_SADC_RX      0x9
+#define X1830_DMA_UART1_TX     0x12
+#define X1830_DMA_UART1_RX     0x13
+#define X1830_DMA_UART0_TX     0x14
+#define X1830_DMA_UART0_RX     0x15
+#define X1830_DMA_SSI0_TX      0x16
+#define X1830_DMA_SSI0_RX      0x17
+#define X1830_DMA_SSI1_TX      0x18
+#define X1830_DMA_SSI1_RX      0x19
+#define X1830_DMA_MSC0_TX      0x1a
+#define X1830_DMA_MSC0_RX      0x1b
+#define X1830_DMA_MSC1_TX      0x1c
+#define X1830_DMA_MSC1_RX      0x1d
+#define X1830_DMA_DMIC_RX      0x21
+#define X1830_DMA_SMB0_TX      0x24
+#define X1830_DMA_SMB0_RX      0x25
+#define X1830_DMA_SMB1_TX      0x26
+#define X1830_DMA_SMB1_RX      0x27
+#define X1830_DMA_DES_TX       0x2e
+#define X1830_DMA_DES_RX       0x2f
+
+#endif /* __DT_BINDINGS_DMA_X1830_DMA_H__ */
diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
new file mode 100644 (file)
index 0000000..f315d5a
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_
+
+#define ASPEED_SCU_IC_VGA_CURSOR_CHANGE                        0
+#define ASPEED_SCU_IC_VGA_SCRATCH_REG_CHANGE           1
+
+#define ASPEED_AST2500_SCU_IC_PCIE_RESET_LO_TO_HI      2
+#define ASPEED_AST2500_SCU_IC_PCIE_RESET_HI_TO_LO      3
+#define ASPEED_AST2500_SCU_IC_LPC_RESET_LO_TO_HI       4
+#define ASPEED_AST2500_SCU_IC_LPC_RESET_HI_TO_LO       5
+#define ASPEED_AST2500_SCU_IC_ISSUE_MSI                        6
+
+#define ASPEED_AST2600_SCU_IC0_PCIE_PERST_LO_TO_HI     2
+#define ASPEED_AST2600_SCU_IC0_PCIE_PERST_HI_TO_LO     3
+#define ASPEED_AST2600_SCU_IC0_PCIE_RCRST_LO_TO_HI     4
+#define ASPEED_AST2600_SCU_IC0_PCIE_RCRST_HI_TO_LO     5
+
+#define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI      0
+#define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO      1
+
+#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
index c614438bcbdb89508432da77c6d55b356c950625..fbc524a900da118619aa3683c7992a1df56627a1 100644 (file)
@@ -46,9 +46,9 @@
 #define RESET_VD_RMEM                  64
 #define RESET_AUDIN                    65
 #define RESET_DBLK                     66
-#define RESET_PIC_DC                   66
-#define RESET_PSC                      66
-#define RESET_NAND                     66
+#define RESET_PIC_DC                   67
+#define RESET_PSC                      68
+#define RESET_NAND                     69
 #define RESET_GE2D                     70
 #define RESET_PARSER_REG               71
 #define RESET_PARSER_FETCH             72
index 0f37a7d5fa7748c91fccf4778f093f863907850a..0f24d701fbdc9f09a9a1e706ad168ae37ed50e49 100644 (file)
@@ -279,6 +279,21 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
 
 /* Validate the processor object's proc_id */
 bool acpi_duplicate_processor_id(int proc_id);
+/* Processor _CTS control */
+struct acpi_processor_power;
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+bool acpi_processor_claim_cst_control(void);
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+                               struct acpi_processor_power *info);
+#else
+static inline bool acpi_processor_claim_cst_control(void) { return false; }
+static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+                                             struct acpi_processor_power *info)
+{
+       return -ENODEV;
+}
+#endif
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
index 6782f0d45ebe5dce4687f1f5db4ab39f3673c3a0..49e5383d4222257bed2e103fa6c2ec29b836bf00 100644 (file)
@@ -19,6 +19,8 @@ struct ahci_host_priv;
 struct platform_device;
 struct scsi_host_template;
 
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
 int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
 void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
 int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
index 74748e306f4b8f5ad8e184d9385221e9a61e9538..05e758b8b894541c1388b8ec97300dc8573fa317 100644 (file)
@@ -60,7 +60,11 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
 u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
 ktime_t alarm_expires_remaining(const struct alarm *alarm);
 
+#ifdef CONFIG_RTC_CLASS
 /* Provide way to access the rtc device being used by alarmtimers */
 struct rtc_device *alarmtimer_get_rtcdev(void);
+#else
+static inline struct rtc_device *alarmtimer_get_rtcdev(void) { return NULL; }
+#endif
 
 #endif
index 3cdb84cdc48843bd970f41f6221e1467fd8b86ff..853d92ceee64ed5d89f8900f2235af22b1b6fbe1 100644 (file)
@@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
                                     gfp_t);
 extern int bio_uncopy_user(struct bio *);
 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+void bio_truncate(struct bio *bio, unsigned new_size);
 
 static inline void zero_fill_bio(struct bio *bio)
 {
index 19394c77ed9955ac0b949f8a536f9ed4fbbace08..e4a6949fd17165979616c4f361d2cf4948703007 100644 (file)
@@ -188,7 +188,6 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
                                    struct request_queue *q);
 int blkcg_init_queue(struct request_queue *q);
-void blkcg_drain_queue(struct request_queue *q);
 void blkcg_exit_queue(struct request_queue *q);
 
 /* Blkio controller policy registration */
@@ -720,7 +719,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
 { return NULL; }
 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_drain_queue(struct request_queue *q) { }
 static inline void blkcg_exit_queue(struct request_queue *q) { }
 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
index 47eb22a3b7f99ed14d2caafeada6bce779b5ee29..4c636c42ad6864523fcf637ddf28ce66ee0934b2 100644 (file)
@@ -328,6 +328,7 @@ struct queue_limits {
        unsigned int            max_sectors;
        unsigned int            max_segment_size;
        unsigned int            physical_block_size;
+       unsigned int            logical_block_size;
        unsigned int            alignment_offset;
        unsigned int            io_min;
        unsigned int            io_opt;
@@ -338,7 +339,6 @@ struct queue_limits {
        unsigned int            discard_granularity;
        unsigned int            discard_alignment;
 
-       unsigned short          logical_block_size;
        unsigned short          max_segments;
        unsigned short          max_integrity_segments;
        unsigned short          max_discard_segments;
@@ -1077,7 +1077,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q,
                unsigned int max_write_same_sectors);
 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
                unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_alignment_offset(struct request_queue *q,
                                       unsigned int alignment);
@@ -1291,7 +1291,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
        return q->limits.max_segment_size;
 }
 
-static inline unsigned short queue_logical_block_size(const struct request_queue *q)
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
 {
        int retval = 512;
 
@@ -1301,7 +1301,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue
        return retval;
 }
 
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
 {
        return queue_logical_block_size(bdev_get_queue(bdev));
 }
index 169fd25f6bc2d697f078abd3e9e430499fdb9aa7..9be71c195d7450ea40631e464c5f08beee8a0878 100644 (file)
@@ -157,8 +157,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
                             struct cgroup *cgroup,
                             enum bpf_attach_type type);
 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
-int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
-void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
+void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
 
 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
@@ -360,9 +360,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
 
 static inline void bpf_cgroup_storage_set(
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
-static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
+static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
                                            struct bpf_map *map) { return 0; }
-static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
+static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
                                              struct bpf_map *map) {}
 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
        struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
index 35903f148be5968ad1b85807d0bfb7c06c249897..085a59afba85751a8513ba70d275b769154e03a0 100644 (file)
@@ -461,6 +461,7 @@ struct bpf_trampoline {
        struct {
                struct btf_func_model model;
                void *addr;
+               bool ftrace_managed;
        } func;
        /* list of BPF programs using this trampoline */
        struct hlist_head progs_hlist[BPF_TRAMP_MAX];
@@ -817,6 +818,8 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 int __bpf_prog_charge(struct user_struct *user, u32 pages);
 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+                         struct bpf_map **used_maps, u32 len);
 
 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
index 679a42253170bf6325cbdfebadfb283986878d2e..a81c13ac19728c233de45d6ef9fbfae4e4a94e06 100644 (file)
@@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec,
        }
 }
 
-/*
- * Get the last single-page segment from the multi-page bvec and store it
- * in @seg
- */
-static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
-                                       struct bio_vec *seg)
-{
-       unsigned total = bvec->bv_offset + bvec->bv_len;
-       unsigned last_page = (total - 1) / PAGE_SIZE;
-
-       seg->bv_page = bvec->bv_page + last_page;
-
-       /* the whole segment is inside the last page */
-       if (bvec->bv_offset >= last_page * PAGE_SIZE) {
-               seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
-               seg->bv_len = bvec->bv_len;
-       } else {
-               seg->bv_offset = 0;
-               seg->bv_len = total - last_page * PAGE_SIZE;
-       }
-}
-
 #endif /* __LINUX_BVEC_ITER_H */
index 9b3c720a31b189b2ccf6637636ad02ecc3503f70..5e3d45525bd358ebefc999945c09ff4d5a124ffa 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/can/error.h>
 #include <linux/can/led.h>
 #include <linux/can/netlink.h>
+#include <linux/can/skb.h>
 #include <linux/netdevice.h>
 
 /*
@@ -91,6 +92,36 @@ struct can_priv {
 #define get_can_dlc(i)         (min_t(__u8, (i), CAN_MAX_DLC))
 #define get_canfd_dlc(i)       (min_t(__u8, (i), CANFD_MAX_DLC))
 
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+                                         struct sk_buff *skb)
+{
+       /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+       if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+               return false;
+
+       /* af_packet does not apply CAN skb specific settings */
+       if (skb->ip_summed == CHECKSUM_NONE) {
+               /* init headroom */
+               can_skb_prv(skb)->ifindex = dev->ifindex;
+               can_skb_prv(skb)->skbcnt = 0;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* preform proper loopback on capable devices */
+               if (dev->flags & IFF_ECHO)
+                       skb->pkt_type = PACKET_LOOPBACK;
+               else
+                       skb->pkt_type = PACKET_HOST;
+
+               skb_reset_mac_header(skb);
+               skb_reset_network_header(skb);
+               skb_reset_transport_header(skb);
+       }
+
+       return true;
+}
+
 /* Drop a given socketbuffer if it does not contain a valid CAN frame. */
 static inline bool can_dropped_invalid_skb(struct net_device *dev,
                                          struct sk_buff *skb)
@@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev,
        } else
                goto inval_skb;
 
+       if (!can_skb_headroom_valid(dev, skb))
+               goto inval_skb;
+
        return false;
 
 inval_skb:
index 92d5fdc8154ee9f1e2ec38b8a78b9b486240bd24..31b1b0e03df8ccb55b33f644ec0d2d9a8427f81d 100644 (file)
@@ -595,17 +595,6 @@ struct governor_attr {
                         size_t count);
 };
 
-static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
-{
-       /*
-        * Allow remote callbacks if:
-        * - dvfs_possible_from_any_cpu flag is set
-        * - the local and remote CPUs share cpufreq policy
-        */
-       return policy->dvfs_possible_from_any_cpu ||
-               cpumask_test_cpu(smp_processor_id(), policy->cpus);
-}
-
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
index 1dabe36bd01192bab2a060d5726eb27e63782677..ec2ef63771f08944f06fe597af058a0ba118a7e4 100644 (file)
@@ -77,6 +77,7 @@ struct cpuidle_state {
 #define CPUIDLE_FLAG_COUPLED   BIT(1) /* state applies to multiple cpus */
 #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
 #define CPUIDLE_FLAG_UNUSABLE  BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF       BIT(4) /* disable this state by default */
 
 struct cpuidle_device_kobj;
 struct cpuidle_state_kobj;
@@ -115,7 +116,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
 struct cpuidle_driver {
        const char              *name;
        struct module           *owner;
-       int                     refcnt;
 
         /* used by the cpuidle framework to setup the broadcast timer */
        unsigned int            bctimer:1;
@@ -147,8 +147,6 @@ extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
 
 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
 extern struct cpuidle_driver *cpuidle_get_driver(void);
-extern struct cpuidle_driver *cpuidle_driver_ref(void);
-extern void cpuidle_driver_unref(void);
 extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
                                        bool disable);
 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
@@ -186,8 +184,6 @@ static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
 {return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
-static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
-static inline void cpuidle_driver_unref(void) {}
 static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
                                               int idx, bool disable) { }
 static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
index 2bae9ed3c7831cfe1a22e1e33e411165c520d87a..c6f82d4bec9f465b711d59dbf80587d75fd04a4c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
 
 #define DEVFREQ_NAME_LEN 16
 
@@ -106,6 +107,20 @@ struct devfreq_dev_profile {
        unsigned int max_state;
 };
 
+/**
+ * struct devfreq_stats - Statistics of devfreq device behavior
+ * @total_trans:       Number of devfreq transitions.
+ * @trans_table:       Statistics of devfreq transitions.
+ * @time_in_state:     Statistics of devfreq states.
+ * @last_update:       The last time stats were updated.
+ */
+struct devfreq_stats {
+       unsigned int total_trans;
+       unsigned int *trans_table;
+       u64 *time_in_state;
+       u64 last_update;
+};
+
 /**
  * struct devfreq - Device devfreq structure
  * @node:      list node - contains the devices with devfreq that have been
@@ -121,23 +136,23 @@ struct devfreq_dev_profile {
  *             devfreq.nb to the corresponding register notifier call chain.
  * @work:      delayed work for load monitoring.
  * @previous_freq:     previously configured frequency value.
+ * @last_status:       devfreq user device info, performance statistics
  * @data:      Private data of the governor. The devfreq framework does not
  *             touch this.
- * @min_freq:  Limit minimum frequency requested by user (0: none)
- * @max_freq:  Limit maximum frequency requested by user (0: none)
+ * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
+ * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
  * @scaling_min_freq:  Limit minimum frequency requested by OPP interface
  * @scaling_max_freq:  Limit maximum frequency requested by OPP interface
  * @stop_polling:       devfreq polling status of a device.
  * @suspend_freq:       frequency of a device set during suspend phase.
  * @resume_freq:        frequency of a device set in resume phase.
  * @suspend_count:      suspend requests counter for a device.
- * @total_trans:       Number of devfreq transitions
- * @trans_table:       Statistics of devfreq transitions
- * @time_in_state:     Statistics of devfreq states
- * @last_stat_updated: The last time stat updated
+ * @stats:     Statistics of devfreq device behavior
  * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @nb_min:            Notifier block for DEV_PM_QOS_MIN_FREQUENCY
+ * @nb_max:            Notifier block for DEV_PM_QOS_MAX_FREQUENCY
  *
- * This structure stores the devfreq information for a give device.
+ * This structure stores the devfreq information for a given device.
  *
  * Note that when a governor accesses entries in struct devfreq in its
  * functions except for the context of callbacks defined in struct
@@ -161,8 +176,8 @@ struct devfreq {
 
        void *data; /* private data for governors */
 
-       unsigned long min_freq;
-       unsigned long max_freq;
+       struct dev_pm_qos_request user_min_freq_req;
+       struct dev_pm_qos_request user_max_freq_req;
        unsigned long scaling_min_freq;
        unsigned long scaling_max_freq;
        bool stop_polling;
@@ -171,13 +186,13 @@ struct devfreq {
        unsigned long resume_freq;
        atomic_t suspend_count;
 
-       /* information for device frequency transition */
-       unsigned int total_trans;
-       unsigned int *trans_table;
-       unsigned long *time_in_state;
-       unsigned long last_stat_updated;
+       /* information for device frequency transitions */
+       struct devfreq_stats stats;
 
        struct srcu_notifier_head transition_notifier_list;
+
+       struct notifier_block nb_min;
+       struct notifier_block nb_max;
 };
 
 struct devfreq_freqs {
index e226030c1df3c3f1c333f2acba8955a116921cf6..96ff76731e93df766d09394f8936254849e5b9b8 100644 (file)
@@ -1666,11 +1666,11 @@ extern bool kill_device(struct device *dev);
 #ifdef CONFIG_DEVTMPFS
 extern int devtmpfs_create_node(struct device *dev);
 extern int devtmpfs_delete_node(struct device *dev);
-extern int devtmpfs_mount(const char *mntdir);
+extern int devtmpfs_mount(void);
 #else
 static inline int devtmpfs_create_node(struct device *dev) { return 0; }
 static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
-static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
+static inline int devtmpfs_mount(void) { return 0; }
 #endif
 
 /* drivers/base/power/shutdown.c */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
new file mode 100644 (file)
index 0000000..61d5cc0
--- /dev/null
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_H_
+#define K3_PSIL_H_
+
+#include <linux/types.h>
+
+#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000
+
+struct device;
+
+/**
+ * enum udma_tp_level - Channel Throughput Levels
+ * @UDMA_TP_NORMAL:    Normal channel
+ * @UDMA_TP_HIGH:      High Throughput channel
+ * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel
+ */
+enum udma_tp_level {
+       UDMA_TP_NORMAL = 0,
+       UDMA_TP_HIGH,
+       UDMA_TP_ULTRAHIGH,
+       UDMA_TP_LAST,
+};
+
+/**
+ * enum psil_endpoint_type - PSI-L Endpoint type
+ * @PSIL_EP_NATIVE:    Normal channel
+ * @PSIL_EP_PDMA_XY:   XY mode PDMA
+ * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA
+ * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA
+ */
+enum psil_endpoint_type {
+       PSIL_EP_NATIVE = 0,
+       PSIL_EP_PDMA_XY,
+       PSIL_EP_PDMA_MCAN,
+       PSIL_EP_PDMA_AASRC,
+};
+
+/**
+ * struct psil_endpoint_config - PSI-L Endpoint configuration
+ * @ep_type:           PSI-L endpoint type
+ * @pkt_mode:          If set, the channel must be in Packet mode, otherwise in
+ *                     TR mode
+ * @notdpkt:           TDCM must be suppressed on the TX channel
+ * @needs_epib:                Endpoint needs EPIB
+ * @psd_size:          If set, PSdata is used by the endpoint
+ * @channel_tpl:       Desired throughput level for the channel
+ * @pdma_acc32:                ACC32 must be enabled on the PDMA side
+ * @pdma_burst:                BURST must be enabled on the PDMA side
+ */
+struct psil_endpoint_config {
+       enum psil_endpoint_type ep_type;
+
+       unsigned pkt_mode:1;
+       unsigned notdpkt:1;
+       unsigned needs_epib:1;
+       u32 psd_size;
+       enum udma_tp_level channel_tpl;
+
+       /* PDMA properties, valid for PSIL_EP_PDMA_* */
+       unsigned pdma_acc32:1;
+       unsigned pdma_burst:1;
+};
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+                          struct psil_endpoint_config *ep_config);
+
+#endif /* K3_PSIL_H_ */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
new file mode 100644 (file)
index 0000000..caadbab
--- /dev/null
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_GLUE_H_
+#define K3_UDMA_GLUE_H_
+
+#include <linux/types.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+
+struct k3_udma_glue_tx_channel_cfg {
+       struct k3_ring_cfg tx_cfg;
+       struct k3_ring_cfg txcq_cfg;
+
+       bool tx_pause_on_err;
+       bool tx_filt_einfo;
+       bool tx_filt_pswords;
+       bool tx_supr_tdpkt;
+       u32  swdata_size;
+};
+
+struct k3_udma_glue_tx_channel;
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+               const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                            struct cppi5_host_desc_t *desc_tx,
+                            dma_addr_t desc_dma);
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                           dma_addr_t *desc_dma);
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+                              bool sync);
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+               void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+
+enum {
+       K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
+       K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
+       K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
+       K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
+};
+
+/**
+ * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
+ *
+ * @rx_cfg:            RX ring configuration
+ * @rxfdq_cfg:         RX free Host PD ring configuration
+ * @ring_rxq_id:       RX ring id (or -1 for any)
+ * @ring_rxfdq0_id:    RX free Host PD ring (FDQ) if (or -1 for any)
+ * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try)
+ * @src_tag_lo_sel:    Rx Source Tag Low Byte Selector in Host PD
+ */
+struct k3_udma_glue_rx_flow_cfg {
+       struct k3_ring_cfg rx_cfg;
+       struct k3_ring_cfg rxfdq_cfg;
+       int ring_rxq_id;
+       int ring_rxfdq0_id;
+       bool rx_error_handling;
+       int src_tag_lo_sel;
+};
+
+/**
+ * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
+ *
+ * @psdata_size:       SW Data is present in Host PD of @swdata_size bytes
+ * @flow_id_base:      first flow_id used by channel.
+ *                     if @flow_id_base = -1 - range of GP rflows will be
+ *                     allocated dynamically.
+ * @flow_id_num:       number of RX flows used by channel
+ * @flow_id_use_rxchan_id:     use RX channel id as flow id,
+ *                             used only if @flow_id_num = 1
+ * @remote             indication that RX channel is remote - some remote CPU
+ *                     core owns and control the RX channel. Linux Host only
+ *                     allowed to attach and configure RX Flow within RX
+ *                     channel. if set - not RX channel operation will be
+ *                     performed by K3 NAVSS DMA glue interface.
+ * @def_flow_cfg       default RX flow configuration,
+ *                     used only if @flow_id_num = 1
+ */
+struct k3_udma_glue_rx_channel_cfg {
+       u32  swdata_size;
+       int  flow_id_base;
+       int  flow_id_num;
+       bool flow_id_use_rxchan_id;
+       bool remote;
+
+       struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
+};
+
+struct k3_udma_glue_rx_channel;
+
+struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
+               struct device *dev,
+               const char *name,
+               struct k3_udma_glue_rx_channel_cfg *cfg);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+                              bool sync);
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, struct cppi5_host_desc_t *desc_tx,
+               dma_addr_t desc_dma);
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+                                   u32 flow_idx);
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+                           u32 flow_num);
+void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
+                            u32 flow_num);
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+               u32 flow_num, void *data,
+               void (*cleanup)(void *data, dma_addr_t desc_dma),
+               bool skip_fdq);
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+                               u32 flow_idx);
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+                                u32 flow_idx);
+
+#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
new file mode 100644 (file)
index 0000000..579356a
--- /dev/null
@@ -0,0 +1,1059 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+/**
+ * struct cppi5_desc_hdr_t - Descriptor header, present in all types of
+ *                          descriptors
+ * @pkt_info0:         Packet info word 0 (n/a in Buffer desc)
+ * @pkt_info0:         Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info0:         Packet info word 2 (n/a in Buffer desc)
+ * @src_dst_tag:       Packet info word 3 (n/a in Buffer desc)
+ */
+struct cppi5_desc_hdr_t {
+       u32 pkt_info0;
+       u32 pkt_info1;
+       u32 pkt_info2;
+       u32 src_dst_tag;
+} __packed;
+
+/**
+ * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition
+ * @hdr:               Descriptor header
+ * @next_desc:         word 4/5: Linking word
+ * @buf_ptr:           word 6/7: Buffer pointer
+ * @buf_info1:         word 8: Buffer valid data length
+ * @org_buf_len:       word 9: Original buffer length
+ * @org_buf_ptr:       word 10/11: Original buffer pointer
+ * @epib[0]:           Extended Packet Info Data (optional, 4 words), and/or
+ *                     Protocol Specific Data (optional, 0-128 bytes in
+ *                     multiples of 4), and/or
+ *                     Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_host_desc_t {
+       struct cppi5_desc_hdr_t hdr;
+       u64 next_desc;
+       u64 buf_ptr;
+       u32 buf_info1;
+       u32 org_buf_len;
+       u64 org_buf_ptr;
+       u32 epib[0];
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN                   (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE            (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE      (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT           (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK            GENMASK(31, 30)
+#define   CPPI5_INFO0_DESC_TYPE_VAL_HOST       (1U)
+#define   CPPI5_INFO0_DESC_TYPE_VAL_MONO       (2U)
+#define   CPPI5_INFO0_DESC_TYPE_VAL_TR         (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT         BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION      BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT    (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK     GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT         (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK          GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT                (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK         GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT         (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK          GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT           (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK            GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT          (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK           GENMASK(13, 0)
+#define CPPI5_INFO1_DESC_FLOWID_DEFAULT                CPPI5_INFO1_DESC_FLOWID_MASK
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT                (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK         GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY            BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET             BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY         BIT(16)
+#define CPPI5_INFO2_DESC_RETP_MASK             GENMASK(18, 16)
+
+#define CPPI5_INFO2_DESC_RETQ_SHIFT            (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK             GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT          (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK           GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT          (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK           GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT    (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK     GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT    (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK     GENMASK(27, 0)
+
+/**
+ * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block
+ * @timestamp:         word 0: application specific timestamp
+ * @sw_info0:          word 1: Software Info 0
+ * @sw_info1:          word 1: Software Info 1
+ * @sw_info2:          word 1: Software Info 2
+ */
+struct cppi5_desc_epib_t {
+       u32 timestamp;  /* w0: application specific timestamp */
+       u32 sw_info0;   /* w1: Software Info 0 */
+       u32 sw_info1;   /* w2: Software Info 1 */
+       u32 sw_info2;   /* w3: Software Info 2 */
+};
+
+/**
+ * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
+ * @hdr:               Descriptor header
+ * @epib[0]:           Extended Packet Info Data (optional, 4 words), and/or
+ *                     Protocol Specific Data (optional, 0-128 bytes in
+ *                     multiples of 4), and/or
+ *                     Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_monolithic_desc_t {
+       struct cppi5_desc_hdr_t hdr;
+       u32 epib[0];
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT    (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK     GENMASK(26, 18)
+
+/*
+ * Reload Count:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1-0x1ff = Vector to the Reload Index and resume processing
+ * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT                (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK         GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX          (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE     CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT                (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK         GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX          (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT       (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK                GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT       (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK                GENMASK(26, 24)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B   (0)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B   (1U)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B   (2U)
+#define   CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B  (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+       print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE,
+                      32, 4, desc, size, false);
+}
+
+#define CPPI5_TDCM_MARKER                      (0x1)
+/**
+ * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
+ * @paddr: Physical address of the packet popped from the ring
+ *
+ * Returns true if the address indicates TDCM
+ */
+static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
+{
+       return (paddr & CPPI5_TDCM_MARKER) ? true : false;
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+       return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+               CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+       return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+               CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+                                        u32 *pkt_id, u32 *flow_id)
+{
+       *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+                  CPPI5_INFO1_DESC_PKTID_SHIFT;
+       *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+                   CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+                                        u32 pkt_id, u32 flow_id)
+{
+       desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK |
+                                CPPI5_INFO1_DESC_FLOWID_MASK);
+       desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+                               CPPI5_INFO1_DESC_PKTID_MASK;
+       desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+                               CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ *  CPPI5_INFO2_HDESC_RETPOLICY
+ *  CPPI5_INFO2_HDESC_EARLYRET
+ *  CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+                                           u32 flags, u32 return_ring_id)
+{
+       desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK |
+                                CPPI5_INFO2_DESC_RETQ_MASK);
+       desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK;
+       desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+                                          u32 *src_tag_id, u32 *dst_tag_id)
+{
+       if (src_tag_id)
+               *src_tag_id = (desc_hdr->src_dst_tag &
+                             CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+                             CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+       if (dst_tag_id)
+               *dst_tag_id = desc_hdr->src_dst_tag &
+                             CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+                                          u32 src_tag_id, u32 dst_tag_id)
+{
+       desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+                               CPPI5_INFO3_DESC_SRCTAG_MASK;
+       desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+                                       u32 sw_data_size)
+{
+       u32 desc_size;
+
+       if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+               return 0;
+
+       desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+                   sw_data_size;
+
+       if (epib)
+               desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ *     CPPI5_INFO0_HDESC_EPIB_PRESENT
+ *     CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+                                   u32 psdata_size)
+{
+       desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+                              CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+       desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+       desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ *     CPPI5_INFO0_HDESC_EPIB_PRESENT
+ *     CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+                                           u32 flags)
+{
+       desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+                                CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+       desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void
+cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+       desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+       desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+                               CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+       u32 psdata_size = 0;
+
+       if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+               psdata_size = (desc->hdr.pkt_info0 &
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+       return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+       return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+                                         u32 pkt_len)
+{
+       desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK;
+       desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+       return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+               CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+                                          u32 ps_flags)
+{
+       desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK;
+       desc->hdr.pkt_info1 |= (ps_flags <<
+                               CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+                               CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+       return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+               CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+                                          u32 pkt_type)
+{
+       desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+       desc->hdr.pkt_info2 |=
+                       (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+                        CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+                                         dma_addr_t buf, u32 buf_data_len,
+                                         dma_addr_t obuf, u32 obuf_len)
+{
+       desc->buf_ptr = buf;
+       desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+       desc->org_buf_ptr = obuf;
+       desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+                                       dma_addr_t *obuf, u32 *obuf_len)
+{
+       *obuf = desc->org_buf_ptr;
+       *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+       desc->buf_ptr = desc->org_buf_ptr;
+       desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+                                          dma_addr_t hbuf_desc)
+{
+       desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t
+cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc)
+{
+       return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+       desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+       desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present -  check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+       return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata -  Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+       u32 psdata_size;
+       void *psdata;
+
+       if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+               return NULL;
+
+       psdata_size = (desc->hdr.pkt_info0 &
+                      CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+                      CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+       if (!psdata_size)
+               return NULL;
+
+       psdata = &desc->epib;
+
+       if (cppi5_hdesc_epib_present(&desc->hdr))
+               psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       return psdata;
+}
+
+/**
+ * cppi5_hdesc_get_swdata -  Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+       u32 psdata_size = 0;
+       void *swdata;
+
+       if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+               psdata_size = (desc->hdr.pkt_info0 &
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+                              CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+       swdata = &desc->epib;
+
+       if (cppi5_hdesc_epib_present(&desc->hdr))
+               swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+       swdata += (psdata_size << 2);
+
+       return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT                    (0U)
+#define CPPI5_TR_TYPE_MASK                     GENMASK(3, 0)
+#define CPPI5_TR_STATIC                                BIT(4)
+#define CPPI5_TR_WAIT                          BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT              (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK               GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT                        (8U)
+#define CPPI5_TR_TRIGGER0_MASK                 GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT           (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK            GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT                        (12U)
+#define CPPI5_TR_TRIGGER1_MASK                 GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT           (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK            GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT                  (16U)
+#define CPPI5_TR_CMD_ID_MASK                   GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT               (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK                        GENMASK(31, 24)
+#define   CPPI5_TR_CSF_SA_INDIRECT             BIT(0)
+#define   CPPI5_TR_CSF_DA_INDIRECT             BIT(1)
+#define   CPPI5_TR_CSF_SUPR_EVT                        BIT(2)
+#define   CPPI5_TR_CSF_EOL_ADV_SHIFT           (4U)
+#define   CPPI5_TR_CSF_EOL_ADV_MASK            GENMASK(6, 4)
+#define   CPPI5_TR_CSF_EOP                     BIT(7)
+
+/**
+ * enum cppi5_tr_types - TR types
+ * @CPPI5_TR_TYPE0:    One dimensional data move
+ * @CPPI5_TR_TYPE1:    Two dimensional data move
+ * @CPPI5_TR_TYPE2:    Three dimensional data move
+ * @CPPI5_TR_TYPE3:    Four dimensional data move
+ * @CPPI5_TR_TYPE4:    Four dimensional data move with data formatting
+ * @CPPI5_TR_TYPE5:    Four dimensional Cache Warm
+ * @CPPI5_TR_TYPE8:    Four Dimensional Block Move
+ * @CPPI5_TR_TYPE9:    Four Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE10:   Two Dimensional Block Move
+ * @CPPI5_TR_TYPE11:   Two Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE15:   Four Dimensional Block Move with Repacking and
+ *                     Indirection
+ */
+enum cppi5_tr_types {
+       CPPI5_TR_TYPE0 = 0,
+       CPPI5_TR_TYPE1,
+       CPPI5_TR_TYPE2,
+       CPPI5_TR_TYPE3,
+       CPPI5_TR_TYPE4,
+       CPPI5_TR_TYPE5,
+       /* type6-7: Reserved */
+       CPPI5_TR_TYPE8 = 8,
+       CPPI5_TR_TYPE9,
+       CPPI5_TR_TYPE10,
+       CPPI5_TR_TYPE11,
+       /* type12-14: Reserved */
+       CPPI5_TR_TYPE15 = 15,
+       CPPI5_TR_TYPE_MAX
+};
+
+/**
+ * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event
+ *                           is generated for each TR.
+ * @CPPI5_TR_EVENT_SIZE_COMPLETION:    When TR is complete and all status for
+ *                                     the TR has been received
+ * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC:     Type 0: when the last data transaction
+ *                                     is sent for the TR
+ *                                     Type 1-11: when ICNT1 is decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC:     Type 0-1,10-11: when the last data
+ *                                     transaction is sent for the TR
+ *                                     All other types: when ICNT2 is
+ *                                     decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC:     Type 0-2,10-11: when the last data
+ *                                     transaction is sent for the TR
+ *                                     All other types: when ICNT3 is
+ *                                     decremented
+ */
+enum cppi5_tr_event_size {
+       CPPI5_TR_EVENT_SIZE_COMPLETION,
+       CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+       CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+       CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+       CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger
+ *                        used to enable the TR to transfer data as specified
+ *                        by TRIGGERx_TYPE field.
+ * @CPPI5_TR_TRIGGER_NONE:             No trigger
+ * @CPPI5_TR_TRIGGER_GLOBAL0:          Global trigger 0
+ * @CPPI5_TR_TRIGGER_GLOBAL1:          Global trigger 1
+ * @CPPI5_TR_TRIGGER_LOCAL_EVENT:      Local Event
+ */
+enum cppi5_tr_trigger {
+       CPPI5_TR_TRIGGER_NONE,
+       CPPI5_TR_TRIGGER_GLOBAL0,
+       CPPI5_TR_TRIGGER_GLOBAL1,
+       CPPI5_TR_TRIGGER_LOCAL_EVENT,
+       CPPI5_TR_TRIGGER_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type
+ *                             of data transfer that will be enabled by
+ *                             receiving a trigger as specified by TRIGGERx.
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC:   The second inner most loop (ICNT1) will
+ *                                     be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC:   The third inner most loop (ICNT2) will
+ *                                     be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC:   The outer most loop (ICNT3) will be
+ *                                     decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ALL:         The entire TR will be allowed to
+ *                                     complete
+ */
+enum cppi5_tr_trigger_type {
+       CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+       CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+       CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+       CPPI5_TR_TRIGGER_TYPE_ALL,
+       CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/**
+ * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @_reserved:         Not used
+ * @addr:              Starting address for the source data or destination data
+ */
+struct cppi5_tr_type0_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 _reserved;
+       u64 addr;
+} __aligned(16) __packed;
+
+/**
+ * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @icnt1:             Total loop iteration count for level 1
+ * @addr:              Starting address for the source data or destination data
+ * @dim1:              Signed dimension for loop level 1
+ */
+struct cppi5_tr_type1_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @icnt1:             Total loop iteration count for level 1
+ * @addr:              Starting address for the source data or destination data
+ * @dim1:              Signed dimension for loop level 1
+ * @icnt2:             Total loop iteration count for level 2
+ * @_reserved:         Not used
+ * @dim2:              Signed dimension for loop level 2
+ */
+struct cppi5_tr_type2_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+       u16 icnt2;
+       u16 _reserved;
+       s32 dim2;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost)
+ * @icnt1:             Total loop iteration count for level 1
+ * @addr:              Starting address for the source data or destination data
+ * @dim1:              Signed dimension for loop level 1
+ * @icnt2:             Total loop iteration count for level 2
+ * @icnt3:             Total loop iteration count for level 3 (outermost)
+ * @dim2:              Signed dimension for loop level 2
+ * @dim3:              Signed dimension for loop level 3
+ */
+struct cppi5_tr_type3_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+       u16 icnt2;
+       u16 icnt3;
+       s32 dim2;
+       s32 dim3;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with
+ *                           Repacking and Indirection Support) TR (64 byte)
+ * @flags:             TR flags (type, triggers, event, configuration)
+ * @icnt0:             Total loop iteration count for level 0 (innermost) for
+ *                     source
+ * @icnt1:             Total loop iteration count for level 1 for source
+ * @addr:              Starting address for the source data
+ * @dim1:              Signed dimension for loop level 1 for source
+ * @icnt2:             Total loop iteration count for level 2 for source
+ * @icnt3:             Total loop iteration count for level 3 (outermost) for
+ *                     source
+ * @dim2:              Signed dimension for loop level 2 for source
+ * @dim3:              Signed dimension for loop level 3 for source
+ * @_reserved:         Not used
+ * @ddim1:             Signed dimension for loop level 1 for destination
+ * @daddr:             Starting address for the destination data
+ * @ddim2:             Signed dimension for loop level 2 for destination
+ * @ddim3:             Signed dimension for loop level 3 for destination
+ * @dicnt0:            Total loop iteration count for level 0 (innermost) for
+ *                     destination
+ * @dicnt1:            Total loop iteration count for level 1 for destination
+ * @dicnt2:            Total loop iteration count for level 2 for destination
+ * @sicnt3:            Total loop iteration count for level 3 (outermost) for
+ *                     destination
+ */
+struct cppi5_tr_type15_t {
+       cppi5_tr_flags_t flags;
+       u16 icnt0;
+       u16 icnt1;
+       u64 addr;
+       s32 dim1;
+       u16 icnt2;
+       u16 icnt3;
+       s32 dim2;
+       s32 dim3;
+       u32 _reserved;
+       s32 ddim1;
+       u64 daddr;
+       s32 ddim2;
+       s32 ddim3;
+       u16 dicnt0;
+       u16 dicnt1;
+       u16 dicnt2;
+       u16 dicnt3;
+} __aligned(64) __packed;
+
+/**
+ * struct cppi5_tr_resp_t - TR response record
+ * @status:            Status type and info
+ * @_reserved:         Not used
+ * @cmd_id:            Command ID for the TR for TR identification
+ * @flags:             Configuration Specific Flags
+ */
+struct cppi5_tr_resp_t {
+       u8 status;
+       u8 _reserved;
+       u8 cmd_id;
+       u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT    (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK     GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT    (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK     GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT          (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK           GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT   (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK    GENMASK(31, 24)
+
+/**
+ * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to
+ *                                 determine what type of status is being
+ *                                 returned.
+ * @CPPI5_TR_RESPONSE_STATUS_NONE:             No error, completion: completed
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR:     Transfer Error, completion: none
+ *                                             or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR:      Aborted Error, completion: none
+ *                                             or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR:   Submission Error, completion:
+ *                                             none
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR:  Unsupported Error, completion:
+ *                                             none
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion:
+ *                                             partially completed
+ * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH:  Teardown Flush, completion: none
+ */
+enum cppi5_tr_resp_status_type {
+       CPPI5_TR_RESPONSE_STATUS_NONE,
+       CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR,
+       CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
+       CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
+       CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+       CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_submission - TR Response Status field values which
+ *                                       corresponds Submission Error
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR
+ *                                             received
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN:   Channel is not owned by the
+ *                                             submitter
+ */
+enum cppi5_tr_resp_status_submission {
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+       CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which
+ *                                        corresponds Unsupported Error
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE:      TR Type not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC:       STATIC not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL:          EOL not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC
+ *                                                     not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE:                AMODE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE:       ELTYPE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT:         DFMT not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR:                SECTR not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field
+ *                                                     not supported
+ */
+enum cppi5_tr_resp_status_unsupported {
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+       CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+       /*
+        * The Size of a TR descriptor is:
+        * 1 x tr_size : the first 16 bytes is used by the packet info block +
+        * tr_count x tr_size : Transfer Request Records +
+        * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+        */
+       return tr_size * (tr_count + 1) +
+               sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ *             through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ *               indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+                                    u32 tr_count, u32 tr_size, u32 reload_idx,
+                                    u32 reload_count)
+{
+       desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+                             CPPI5_INFO0_HDESC_TYPE_SHIFT;
+       desc_hdr->pkt_info0 |=
+                       (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+                       CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+       desc_hdr->pkt_info0 |=
+                       (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+                       CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+       desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+       desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+                               CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+                               CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+                                enum cppi5_tr_types type, bool static_tr,
+                                bool wait, enum cppi5_tr_event_size event_size,
+                                u32 cmd_id)
+{
+       *flags = type;
+       *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+                 CPPI5_TR_EVENT_SIZE_MASK;
+
+       *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+                 CPPI5_TR_CMD_ID_MASK;
+
+       if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+               *flags |= CPPI5_TR_STATIC;
+
+       if (wait)
+               *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+               enum cppi5_tr_trigger trigger0,
+               enum cppi5_tr_trigger_type trigger0_type,
+               enum cppi5_tr_trigger trigger1,
+               enum cppi5_tr_trigger_type trigger1_type)
+{
+       *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK |
+                   CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK);
+       *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+                 CPPI5_TR_TRIGGER0_MASK;
+       *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+                 CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+       *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+                 CPPI5_TR_TRIGGER1_MASK;
+       *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+                 CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+       *flags &= ~CPPI5_TR_CSF_FLAGS_MASK;
+       *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+                 CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
index 8fcdee1c0cf9559fc99671bdfb64782acd08619f..64461fc64e1bd2dddddf77f80388f7fb513cf094 100644 (file)
@@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
  * @bytes_transferred: byte counter
  */
 
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ *  client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ *  helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *     construct the metadata in the client's buffer
+ *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ *     descriptor
+ *   3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *   2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ *     descriptor
+ *   3. submit the transfer
+ *   4. when the transfer is completed, the metadata should be available in the
+ *     attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ *  driver. The client driver can ask for the pointer, maximum size and the
+ *  currently used size of the metadata and can directly update or read it.
+ *  dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ *  provided as helper functions.
+ *
+ *  Note: the metadata area for the descriptor is no longer valid after the
+ *  transfer has been completed (valid up to the point when the completion
+ *  callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *   2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ *     metadata area
+ *   3. update the metadata at the pointer
+ *   4. use dmaengine_desc_set_metadata_len()  to tell the DMA engine the amount
+ *     of data the client has placed into the metadata buffer
+ *   5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ *   1. prepare the descriptor (dmaengine_prep_*)
+ *   2. submit the transfer
+ *   3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ *     pointer to the engine's metadata area
+ *   4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+       DESC_METADATA_NONE = 0,
+       DESC_METADATA_CLIENT = BIT(0),
+       DESC_METADATA_ENGINE = BIT(1),
+};
+
 struct dma_chan_percpu {
        /* stats */
        unsigned long memcpy_count;
@@ -238,10 +294,12 @@ struct dma_router {
 /**
  * struct dma_chan - devices supply DMA channels, clients use them
  * @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
  * @cookie: last cookie value returned to client
  * @completed_cookie: last completed cookie for this channel
  * @chan_id: channel ID for sysfs
  * @dev: class device for sysfs
+ * @name: backlink name for sysfs
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
  * @client_count: how many clients are using this channel
@@ -252,12 +310,14 @@ struct dma_router {
  */
 struct dma_chan {
        struct dma_device *device;
+       struct device *slave;
        dma_cookie_t cookie;
        dma_cookie_t completed_cookie;
 
        /* sysfs */
        int chan_id;
        struct dma_chan_dev *dev;
+       const char *name;
 
        struct list_head device_node;
        struct dma_chan_percpu __percpu *local;
@@ -475,19 +535,36 @@ struct dmaengine_unmap_data {
        dma_addr_t addr[0];
 };
 
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+       int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+                     size_t len);
+
+       void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+                        size_t *payload_len, size_t *max_len);
+       int (*set_len)(struct dma_async_tx_descriptor *desc,
+                      size_t payload_len);
+};
+
 /**
  * struct dma_async_tx_descriptor - async transaction descriptor
  * ---dma generic offload fields---
  * @cookie: tracking cookie for this transaction, set to -EBUSY if
  *     this tx is sitting on a dependency list
  * @flags: flags to augment operation preparation, control completion, and
- *     communicate status
+ *     communicate status
  * @phys: physical address of the descriptor
  * @chan: target channel for this operation
  * @tx_submit: accept the descriptor, assign ordered cookie and mark the
  * descriptor pending. To be pushed on .issue_pending() call
  * @callback: routine to call after this operation is complete
  * @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ *     DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ *     DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ *     DMA driver if metadata mode is supported with the descriptor
  * ---async_tx api specific fields---
  * @next: at completion submit this descriptor
  * @parent: pointer to the next level up in the dependency chain
@@ -504,6 +581,8 @@ struct dma_async_tx_descriptor {
        dma_async_tx_callback_result callback_result;
        void *callback_param;
        struct dmaengine_unmap_data *unmap;
+       enum dma_desc_metadata_mode desc_metadata_mode;
+       struct dma_descriptor_metadata_ops *metadata_ops;
 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        struct dma_async_tx_descriptor *next;
        struct dma_async_tx_descriptor *parent;
@@ -611,11 +690,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
  * @residue: the remaining number of bytes left to transmit
  *     on the selected transfer for states DMA_IN_PROGRESS and
  *     DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
  */
 struct dma_tx_state {
        dma_cookie_t last;
        dma_cookie_t used;
        u32 residue;
+       u32 in_flight_bytes;
 };
 
 /**
@@ -666,6 +747,7 @@ struct dma_filter {
  * @global_node: list_head for global dma_device_list
  * @filter: information for device/slave to filter function/param mapping
  * @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
  * @max_xor: maximum number of xor sources, 0 if no capability
  * @max_pq: maximum number of PQ sources and PQ-continue capability
  * @copy_align: alignment shift for memcpy operations
@@ -674,6 +756,7 @@ struct dma_filter {
  * @fill_align: alignment shift for memset operations
  * @dev_id: unique device ID
  * @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
  * @src_addr_widths: bit mask of src addr widths the device supports
  *     Width is specified in bytes, e.g. for a device supporting
  *     a width of 4 the mask should have BIT(4) set.
@@ -718,15 +801,21 @@ struct dma_filter {
  *     will just return a simple status code
  * @device_issue_pending: push pending transactions to hardware
  * @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ *     called and there are no further references to this structure. This
+ *     must be implemented to free resources however many existing drivers
+ *     do not and are therefore not safe to unbind while in use.
+ *
  */
 struct dma_device {
-
+       struct kref ref;
        unsigned int chancnt;
        unsigned int privatecnt;
        struct list_head channels;
        struct list_head global_node;
        struct dma_filter filter;
        dma_cap_mask_t  cap_mask;
+       enum dma_desc_metadata_mode desc_metadata_modes;
        unsigned short max_xor;
        unsigned short max_pq;
        enum dmaengine_alignment copy_align;
@@ -737,6 +826,7 @@ struct dma_device {
 
        int dev_id;
        struct device *dev;
+       struct module *owner;
 
        u32 src_addr_widths;
        u32 dst_addr_widths;
@@ -800,6 +890,7 @@ struct dma_device {
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *txstate);
        void (*device_issue_pending)(struct dma_chan *chan);
+       void (*device_release)(struct dma_device *dev);
 };
 
 static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -902,6 +993,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
                                                    len, flags);
 }
 
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+               enum dma_desc_metadata_mode mode)
+{
+       if (!chan)
+               return false;
+
+       return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+                                  void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+                                     size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+                                   size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+               struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+       return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+               struct dma_async_tx_descriptor *desc, size_t *payload_len,
+               size_t *max_len)
+{
+       return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+               struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
 /**
  * dmaengine_terminate_all() - Terminate all active DMA transfers
  * @chan: The channel for which to terminate the transfers
@@ -1364,8 +1490,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
 {
        struct dma_slave_caps caps;
+       int ret;
 
-       dma_get_slave_caps(tx->chan, &caps);
+       ret = dma_get_slave_caps(tx->chan, &caps);
+       if (ret)
+               return ret;
 
        if (caps.descriptor_reuse) {
                tx->flags |= DMA_CTRL_REUSE;
@@ -1399,16 +1528,16 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
 int dma_async_device_register(struct dma_device *device);
 int dmaenginem_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+                                     struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+                                        struct dma_chan *chan);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
 #define dma_request_channel(mask, x, y) \
        __dma_request_channel(&(mask), x, y, NULL)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
-       __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
 
 static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
                                  dma_filter_fn fn, void *fn_param,
                                  struct device *dev, const char *name)
 {
@@ -1421,6 +1550,25 @@ static inline struct dma_chan
        if (!fn || !fn_param)
                return NULL;
 
-       return __dma_request_channel(mask, fn, fn_param, NULL);
+       return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+       switch (dir) {
+       case DMA_DEV_TO_MEM:
+               return "DEV_TO_MEM";
+       case DMA_MEM_TO_DEV:
+               return "MEM_TO_DEV";
+       case DMA_MEM_TO_MEM:
+               return "MEM_TO_MEM";
+       case DMA_DEV_TO_DEV:
+               return "DEV_TO_DEV";
+       default:
+               break;
+       }
+
+       return "invalid";
 }
 #endif /* DMAENGINE_H */
index 99dfea595c8c79880e19740ac088d12b4b306671..7efd7072cca5855b1604e18255977dad64378c61 100644 (file)
@@ -48,6 +48,27 @@ typedef u16 efi_char16_t;            /* UNICODE character */
 typedef u64 efi_physical_addr_t;
 typedef void *efi_handle_t;
 
+#if defined(CONFIG_X86_64)
+#define __efiapi __attribute__((ms_abi))
+#elif defined(CONFIG_X86_32)
+#define __efiapi __attribute__((regparm(0)))
+#else
+#define __efiapi
+#endif
+
+#define efi_get_handle_at(array, idx)                                  \
+       (efi_is_native() ? (array)[idx]                                 \
+               : (efi_handle_t)(unsigned long)((u32 *)(array))[idx])
+
+#define efi_get_handle_num(size)                                       \
+       ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
+
+#define for_each_efi_handle(handle, array, size, i)                    \
+       for (i = 0;                                                     \
+            i < efi_get_handle_num(size) &&                            \
+               ((handle = efi_get_handle_at((array), i)) || true);     \
+            i++)
+
 /*
  * The UEFI spec and EDK2 reference implementation both define EFI_GUID as
  * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
@@ -251,106 +272,71 @@ typedef struct {
        u32 create_event_ex;
 } __packed efi_boot_services_32_t;
 
-typedef struct {
-       efi_table_hdr_t hdr;
-       u64 raise_tpl;
-       u64 restore_tpl;
-       u64 allocate_pages;
-       u64 free_pages;
-       u64 get_memory_map;
-       u64 allocate_pool;
-       u64 free_pool;
-       u64 create_event;
-       u64 set_timer;
-       u64 wait_for_event;
-       u64 signal_event;
-       u64 close_event;
-       u64 check_event;
-       u64 install_protocol_interface;
-       u64 reinstall_protocol_interface;
-       u64 uninstall_protocol_interface;
-       u64 handle_protocol;
-       u64 __reserved;
-       u64 register_protocol_notify;
-       u64 locate_handle;
-       u64 locate_device_path;
-       u64 install_configuration_table;
-       u64 load_image;
-       u64 start_image;
-       u64 exit;
-       u64 unload_image;
-       u64 exit_boot_services;
-       u64 get_next_monotonic_count;
-       u64 stall;
-       u64 set_watchdog_timer;
-       u64 connect_controller;
-       u64 disconnect_controller;
-       u64 open_protocol;
-       u64 close_protocol;
-       u64 open_protocol_information;
-       u64 protocols_per_handle;
-       u64 locate_handle_buffer;
-       u64 locate_protocol;
-       u64 install_multiple_protocol_interfaces;
-       u64 uninstall_multiple_protocol_interfaces;
-       u64 calculate_crc32;
-       u64 copy_mem;
-       u64 set_mem;
-       u64 create_event_ex;
-} __packed efi_boot_services_64_t;
-
 /*
  * EFI Boot Services table
  */
-typedef struct {
-       efi_table_hdr_t hdr;
-       void *raise_tpl;
-       void *restore_tpl;
-       efi_status_t (*allocate_pages)(int, int, unsigned long,
-                                      efi_physical_addr_t *);
-       efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
-       efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
-                                      unsigned long *, u32 *);
-       efi_status_t (*allocate_pool)(int, unsigned long, void **);
-       efi_status_t (*free_pool)(void *);
-       void *create_event;
-       void *set_timer;
-       void *wait_for_event;
-       void *signal_event;
-       void *close_event;
-       void *check_event;
-       void *install_protocol_interface;
-       void *reinstall_protocol_interface;
-       void *uninstall_protocol_interface;
-       efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
-       void *__reserved;
-       void *register_protocol_notify;
-       efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
-                                     unsigned long *, efi_handle_t *);
-       void *locate_device_path;
-       efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
-       void *load_image;
-       void *start_image;
-       void *exit;
-       void *unload_image;
-       efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
-       void *get_next_monotonic_count;
-       void *stall;
-       void *set_watchdog_timer;
-       void *connect_controller;
-       void *disconnect_controller;
-       void *open_protocol;
-       void *close_protocol;
-       void *open_protocol_information;
-       void *protocols_per_handle;
-       void *locate_handle_buffer;
-       efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
-       void *install_multiple_protocol_interfaces;
-       void *uninstall_multiple_protocol_interfaces;
-       void *calculate_crc32;
-       void *copy_mem;
-       void *set_mem;
-       void *create_event_ex;
+typedef union {
+       struct {
+               efi_table_hdr_t hdr;
+               void *raise_tpl;
+               void *restore_tpl;
+               efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long,
+                                                       efi_physical_addr_t *);
+               efi_status_t (__efiapi *free_pages)(efi_physical_addr_t,
+                                                   unsigned long);
+               efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *,
+                                                       unsigned long *,
+                                                       unsigned long *, u32 *);
+               efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
+                                                      void **);
+               efi_status_t (__efiapi *free_pool)(void *);
+               void *create_event;
+               void *set_timer;
+               void *wait_for_event;
+               void *signal_event;
+               void *close_event;
+               void *check_event;
+               void *install_protocol_interface;
+               void *reinstall_protocol_interface;
+               void *uninstall_protocol_interface;
+               efi_status_t (__efiapi *handle_protocol)(efi_handle_t,
+                                                        efi_guid_t *, void **);
+               void *__reserved;
+               void *register_protocol_notify;
+               efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *,
+                                                      void *, unsigned long *,
+                                                      efi_handle_t *);
+               void *locate_device_path;
+               efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
+                                                                    void *);
+               void *load_image;
+               void *start_image;
+               void *exit;
+               void *unload_image;
+               efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
+                                                           unsigned long);
+               void *get_next_monotonic_count;
+               void *stall;
+               void *set_watchdog_timer;
+               void *connect_controller;
+               efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
+                                                              efi_handle_t,
+                                                              efi_handle_t);
+               void *open_protocol;
+               void *close_protocol;
+               void *open_protocol_information;
+               void *protocols_per_handle;
+               void *locate_handle_buffer;
+               efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
+                                                        void **);
+               void *install_multiple_protocol_interfaces;
+               void *uninstall_multiple_protocol_interfaces;
+               void *calculate_crc32;
+               void *copy_mem;
+               void *set_mem;
+               void *create_event_ex;
+       };
+       efi_boot_services_32_t mixed_mode;
 } efi_boot_services_t;
 
 typedef enum {
@@ -383,10 +369,14 @@ typedef struct {
        u32 write;
 } efi_pci_io_protocol_access_32_t;
 
-typedef struct {
-       u64 read;
-       u64 write;
-} efi_pci_io_protocol_access_64_t;
+typedef union efi_pci_io_protocol efi_pci_io_protocol_t;
+
+typedef
+efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *,
+                                                  EFI_PCI_IO_PROTOCOL_WIDTH,
+                                                  u32 offset,
+                                                  unsigned long count,
+                                                  void *buffer);
 
 typedef struct {
        void *read;
@@ -394,64 +384,54 @@ typedef struct {
 } efi_pci_io_protocol_access_t;
 
 typedef struct {
-       u32 poll_mem;
-       u32 poll_io;
-       efi_pci_io_protocol_access_32_t mem;
-       efi_pci_io_protocol_access_32_t io;
-       efi_pci_io_protocol_access_32_t pci;
-       u32 copy_mem;
-       u32 map;
-       u32 unmap;
-       u32 allocate_buffer;
-       u32 free_buffer;
-       u32 flush;
-       u32 get_location;
-       u32 attributes;
-       u32 get_bar_attributes;
-       u32 set_bar_attributes;
-       u64 romsize;
-       u32 romimage;
-} efi_pci_io_protocol_32_t;
-
-typedef struct {
-       u64 poll_mem;
-       u64 poll_io;
-       efi_pci_io_protocol_access_64_t mem;
-       efi_pci_io_protocol_access_64_t io;
-       efi_pci_io_protocol_access_64_t pci;
-       u64 copy_mem;
-       u64 map;
-       u64 unmap;
-       u64 allocate_buffer;
-       u64 free_buffer;
-       u64 flush;
-       u64 get_location;
-       u64 attributes;
-       u64 get_bar_attributes;
-       u64 set_bar_attributes;
-       u64 romsize;
-       u64 romimage;
-} efi_pci_io_protocol_64_t;
+       efi_pci_io_protocol_cfg_t read;
+       efi_pci_io_protocol_cfg_t write;
+} efi_pci_io_protocol_config_access_t;
 
-typedef struct {
-       void *poll_mem;
-       void *poll_io;
-       efi_pci_io_protocol_access_t mem;
-       efi_pci_io_protocol_access_t io;
-       efi_pci_io_protocol_access_t pci;
-       void *copy_mem;
-       void *map;
-       void *unmap;
-       void *allocate_buffer;
-       void *free_buffer;
-       void *flush;
-       void *get_location;
-       void *attributes;
-       void *get_bar_attributes;
-       void *set_bar_attributes;
-       uint64_t romsize;
-       void *romimage;
-} efi_pci_io_protocol_t;
+union efi_pci_io_protocol {
+       struct {
+               void *poll_mem;
+               void *poll_io;
+               efi_pci_io_protocol_access_t mem;
+               efi_pci_io_protocol_access_t io;
+               efi_pci_io_protocol_config_access_t pci;
+               void *copy_mem;
+               void *map;
+               void *unmap;
+               void *allocate_buffer;
+               void *free_buffer;
+               void *flush;
+               efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *,
+                                                     unsigned long *segment_nr,
+                                                     unsigned long *bus_nr,
+                                                     unsigned long *device_nr,
+                                                     unsigned long *func_nr);
+               void *attributes;
+               void *get_bar_attributes;
+               void *set_bar_attributes;
+               uint64_t romsize;
+               void *romimage;
+       };
+       struct {
+               u32 poll_mem;
+               u32 poll_io;
+               efi_pci_io_protocol_access_32_t mem;
+               efi_pci_io_protocol_access_32_t io;
+               efi_pci_io_protocol_access_32_t pci;
+               u32 copy_mem;
+               u32 map;
+               u32 unmap;
+               u32 allocate_buffer;
+               u32 free_buffer;
+               u32 flush;
+               u32 get_location;
+               u32 attributes;
+               u32 get_bar_attributes;
+               u32 set_bar_attributes;
+               u64 romsize;
+               u32 romimage;
+       } mixed_mode;
+};
 
 #define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
 #define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
@@ -473,54 +453,62 @@ typedef struct {
 #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
 #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
 
-typedef struct {
-       u32 version;
-       u32 get;
-       u32 set;
-       u32 del;
-       u32 get_all;
-} apple_properties_protocol_32_t;
+struct efi_dev_path;
 
-typedef struct {
-       u64 version;
-       u64 get;
-       u64 set;
-       u64 del;
-       u64 get_all;
-} apple_properties_protocol_64_t;
-
-typedef struct {
-       u32 get_capability;
-       u32 get_event_log;
-       u32 hash_log_extend_event;
-       u32 submit_command;
-       u32 get_active_pcr_banks;
-       u32 set_active_pcr_banks;
-       u32 get_result_of_set_active_pcr_banks;
-} efi_tcg2_protocol_32_t;
+typedef union apple_properties_protocol apple_properties_protocol_t;
 
-typedef struct {
-       u64 get_capability;
-       u64 get_event_log;
-       u64 hash_log_extend_event;
-       u64 submit_command;
-       u64 get_active_pcr_banks;
-       u64 set_active_pcr_banks;
-       u64 get_result_of_set_active_pcr_banks;
-} efi_tcg2_protocol_64_t;
+union apple_properties_protocol {
+       struct {
+               unsigned long version;
+               efi_status_t (__efiapi *get)(apple_properties_protocol_t *,
+                                            struct efi_dev_path *,
+                                            efi_char16_t *, void *, u32 *);
+               efi_status_t (__efiapi *set)(apple_properties_protocol_t *,
+                                            struct efi_dev_path *,
+                                            efi_char16_t *, void *, u32);
+               efi_status_t (__efiapi *del)(apple_properties_protocol_t *,
+                                            struct efi_dev_path *,
+                                            efi_char16_t *);
+               efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *,
+                                                void *buffer, u32 *);
+       };
+       struct {
+               u32 version;
+               u32 get;
+               u32 set;
+               u32 del;
+               u32 get_all;
+       } mixed_mode;
+};
 
 typedef u32 efi_tcg2_event_log_format;
 
-typedef struct {
-       void *get_capability;
-       efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
-               efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
-       void *hash_log_extend_event;
-       void *submit_command;
-       void *get_active_pcr_banks;
-       void *set_active_pcr_banks;
-       void *get_result_of_set_active_pcr_banks;
-} efi_tcg2_protocol_t;
+typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
+
+union efi_tcg2_protocol {
+       struct {
+               void *get_capability;
+               efi_status_t (__efiapi *get_event_log)(efi_handle_t,
+                                                      efi_tcg2_event_log_format,
+                                                      efi_physical_addr_t *,
+                                                      efi_physical_addr_t *,
+                                                      efi_bool_t *);
+               void *hash_log_extend_event;
+               void *submit_command;
+               void *get_active_pcr_banks;
+               void *set_active_pcr_banks;
+               void *get_result_of_set_active_pcr_banks;
+       };
+       struct {
+               u32 get_capability;
+               u32 get_event_log;
+               u32 hash_log_extend_event;
+               u32 submit_command;
+               u32 get_active_pcr_banks;
+               u32 set_active_pcr_banks;
+               u32 get_result_of_set_active_pcr_banks;
+       } mixed_mode;
+};
 
 /*
  * Types and defines for EFI ResetSystem
@@ -553,24 +541,6 @@ typedef struct {
        u32 query_variable_info;
 } efi_runtime_services_32_t;
 
-typedef struct {
-       efi_table_hdr_t hdr;
-       u64 get_time;
-       u64 set_time;
-       u64 get_wakeup_time;
-       u64 set_wakeup_time;
-       u64 set_virtual_address_map;
-       u64 convert_pointer;
-       u64 get_variable;
-       u64 get_next_variable;
-       u64 set_variable;
-       u64 get_next_high_mono_count;
-       u64 reset_system;
-       u64 update_capsule;
-       u64 query_capsule_caps;
-       u64 query_variable_info;
-} efi_runtime_services_64_t;
-
 typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
 typedef efi_status_t efi_set_time_t (efi_time_t *tm);
 typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -605,22 +575,25 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
                                                unsigned long size,
                                                bool nonblocking);
 
-typedef struct {
-       efi_table_hdr_t                 hdr;
-       efi_get_time_t                  *get_time;
-       efi_set_time_t                  *set_time;
-       efi_get_wakeup_time_t           *get_wakeup_time;
-       efi_set_wakeup_time_t           *set_wakeup_time;
-       efi_set_virtual_address_map_t   *set_virtual_address_map;
-       void                            *convert_pointer;
-       efi_get_variable_t              *get_variable;
-       efi_get_next_variable_t         *get_next_variable;
-       efi_set_variable_t              *set_variable;
-       efi_get_next_high_mono_count_t  *get_next_high_mono_count;
-       efi_reset_system_t              *reset_system;
-       efi_update_capsule_t            *update_capsule;
-       efi_query_capsule_caps_t        *query_capsule_caps;
-       efi_query_variable_info_t       *query_variable_info;
+typedef union {
+       struct {
+               efi_table_hdr_t                         hdr;
+               efi_get_time_t __efiapi                 *get_time;
+               efi_set_time_t __efiapi                 *set_time;
+               efi_get_wakeup_time_t __efiapi          *get_wakeup_time;
+               efi_set_wakeup_time_t __efiapi          *set_wakeup_time;
+               efi_set_virtual_address_map_t __efiapi  *set_virtual_address_map;
+               void                                    *convert_pointer;
+               efi_get_variable_t __efiapi             *get_variable;
+               efi_get_next_variable_t __efiapi        *get_next_variable;
+               efi_set_variable_t __efiapi             *set_variable;
+               efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count;
+               efi_reset_system_t __efiapi             *reset_system;
+               efi_update_capsule_t __efiapi           *update_capsule;
+               efi_query_capsule_caps_t __efiapi       *query_capsule_caps;
+               efi_query_variable_info_t __efiapi      *query_variable_info;
+       };
+       efi_runtime_services_32_t mixed_mode;
 } efi_runtime_services_t;
 
 void efi_native_runtime_setup(void);
@@ -706,9 +679,12 @@ typedef struct {
        u32 table;
 } efi_config_table_32_t;
 
-typedef struct {
-       efi_guid_t guid;
-       unsigned long table;
+typedef union {
+       struct {
+               efi_guid_t guid;
+               void *table;
+       };
+       efi_config_table_32_t mixed_mode;
 } efi_config_table_t;
 
 typedef struct {
@@ -760,32 +736,38 @@ typedef struct {
        u32 tables;
 } efi_system_table_32_t;
 
-typedef struct {
-       efi_table_hdr_t hdr;
-       unsigned long fw_vendor;        /* physical addr of CHAR16 vendor string */
-       u32 fw_revision;
-       unsigned long con_in_handle;
-       unsigned long con_in;
-       unsigned long con_out_handle;
-       unsigned long con_out;
-       unsigned long stderr_handle;
-       unsigned long stderr;
-       efi_runtime_services_t *runtime;
-       efi_boot_services_t *boottime;
-       unsigned long nr_tables;
-       unsigned long tables;
+typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
+
+typedef union {
+       struct {
+               efi_table_hdr_t hdr;
+               unsigned long fw_vendor;        /* physical addr of CHAR16 vendor string */
+               u32 fw_revision;
+               unsigned long con_in_handle;
+               unsigned long con_in;
+               unsigned long con_out_handle;
+               efi_simple_text_output_protocol_t *con_out;
+               unsigned long stderr_handle;
+               unsigned long stderr;
+               efi_runtime_services_t *runtime;
+               efi_boot_services_t *boottime;
+               unsigned long nr_tables;
+               unsigned long tables;
+       };
+       efi_system_table_32_t mixed_mode;
 } efi_system_table_t;
 
 /*
  * Architecture independent structure for describing a memory map for the
- * benefit of efi_memmap_init_early(), saving us the need to pass four
- * parameters.
+ * benefit of efi_memmap_init_early(), and for passing context between
+ * efi_memmap_alloc() and efi_memmap_install().
  */
 struct efi_memory_map_data {
        phys_addr_t phys_map;
        unsigned long size;
        unsigned long desc_version;
        unsigned long desc_size;
+       unsigned long flags;
 };
 
 struct efi_memory_map {
@@ -795,7 +777,10 @@ struct efi_memory_map {
        int nr_map;
        unsigned long desc_version;
        unsigned long desc_size;
-       bool late;
+#define EFI_MEMMAP_LATE (1UL << 0)
+#define EFI_MEMMAP_MEMBLOCK (1UL << 1)
+#define EFI_MEMMAP_SLAB (1UL << 2)
+       unsigned long flags;
 };
 
 struct efi_mem_range {
@@ -813,41 +798,9 @@ struct efi_fdt_params {
 
 typedef struct {
        u32 revision;
-       u32 parent_handle;
-       u32 system_table;
-       u32 device_handle;
-       u32 file_path;
-       u32 reserved;
-       u32 load_options_size;
-       u32 load_options;
-       u32 image_base;
-       __aligned_u64 image_size;
-       unsigned int image_code_type;
-       unsigned int image_data_type;
-       unsigned long unload;
-} efi_loaded_image_32_t;
-
-typedef struct {
-       u32 revision;
-       u64 parent_handle;
-       u64 system_table;
-       u64 device_handle;
-       u64 file_path;
-       u64 reserved;
-       u32 load_options_size;
-       u64 load_options;
-       u64 image_base;
-       __aligned_u64 image_size;
-       unsigned int image_code_type;
-       unsigned int image_data_type;
-       unsigned long unload;
-} efi_loaded_image_64_t;
-
-typedef struct {
-       u32 revision;
-       void *parent_handle;
+       efi_handle_t parent_handle;
        efi_system_table_t *system_table;
-       void *device_handle;
+       efi_handle_t device_handle;
        void *file_path;
        void *reserved;
        u32 load_options_size;
@@ -856,10 +809,9 @@ typedef struct {
        __aligned_u64 image_size;
        unsigned int image_code_type;
        unsigned int image_data_type;
-       unsigned long unload;
+       efi_status_t ( __efiapi *unload)(efi_handle_t image_handle);
 } efi_loaded_image_t;
 
-
 typedef struct {
        u64 size;
        u64 file_size;
@@ -871,67 +823,34 @@ typedef struct {
        efi_char16_t filename[1];
 } efi_file_info_t;
 
-typedef struct {
-       u64 revision;
-       u32 open;
-       u32 close;
-       u32 delete;
-       u32 read;
-       u32 write;
-       u32 get_position;
-       u32 set_position;
-       u32 get_info;
-       u32 set_info;
-       u32 flush;
-} efi_file_handle_32_t;
+typedef struct efi_file_handle efi_file_handle_t;
 
-typedef struct {
-       u64 revision;
-       u64 open;
-       u64 close;
-       u64 delete;
-       u64 read;
-       u64 write;
-       u64 get_position;
-       u64 set_position;
-       u64 get_info;
-       u64 set_info;
-       u64 flush;
-} efi_file_handle_64_t;
-
-typedef struct _efi_file_handle {
+struct efi_file_handle {
        u64 revision;
-       efi_status_t (*open)(struct _efi_file_handle *,
-                            struct _efi_file_handle **,
-                            efi_char16_t *, u64, u64);
-       efi_status_t (*close)(struct _efi_file_handle *);
+       efi_status_t (__efiapi *open)(efi_file_handle_t *,
+                                     efi_file_handle_t **,
+                                     efi_char16_t *, u64, u64);
+       efi_status_t (__efiapi *close)(efi_file_handle_t *);
        void *delete;
-       efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
-                            void *);
+       efi_status_t (__efiapi *read)(efi_file_handle_t *,
+                                     unsigned long *, void *);
        void *write;
        void *get_position;
        void *set_position;
-       efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
-                       unsigned long *, void *);
+       efi_status_t (__efiapi *get_info)(efi_file_handle_t *,
+                                         efi_guid_t *, unsigned long *,
+                                         void *);
        void *set_info;
        void *flush;
-} efi_file_handle_t;
+};
 
-typedef struct {
-       u64 revision;
-       u32 open_volume;
-} efi_file_io_interface_32_t;
+typedef struct efi_file_io_interface efi_file_io_interface_t;
 
-typedef struct {
+struct efi_file_io_interface {
        u64 revision;
-       u64 open_volume;
-} efi_file_io_interface_64_t;
-
-typedef struct _efi_file_io_interface {
-       u64 revision;
-       int (*open_volume)(struct _efi_file_io_interface *,
-                          efi_file_handle_t **);
-} efi_file_io_interface_t;
+       int (__efiapi *open_volume)(efi_file_io_interface_t *,
+                                   efi_file_handle_t **);
+};
 
 #define EFI_FILE_MODE_READ     0x0000000000000001
 #define EFI_FILE_MODE_WRITE    0x0000000000000002
@@ -1015,7 +934,6 @@ extern struct efi {
        efi_query_capsule_caps_t *query_capsule_caps;
        efi_get_next_high_mono_count_t *get_next_high_mono_count;
        efi_reset_system_t *reset_system;
-       efi_set_virtual_address_map_t *set_virtual_address_map;
        struct efi_memory_map memmap;
        unsigned long flags;
 } efi;
@@ -1056,11 +974,14 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
 #endif
 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
 
-extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
+extern int __init efi_memmap_alloc(unsigned int num_entries,
+                                  struct efi_memory_map_data *data);
+extern void __efi_memmap_free(u64 phys, unsigned long size,
+                             unsigned long flags);
 extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
 extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
 extern void __init efi_memmap_unmap(void);
-extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
+extern int __init efi_memmap_install(struct efi_memory_map_data *data);
 extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
                                         struct range *range);
 extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
@@ -1391,22 +1312,18 @@ struct efivar_entry {
        bool deleting;
 };
 
-typedef struct {
-       u32 reset;
-       u32 output_string;
-       u32 test_string;
-} efi_simple_text_output_protocol_32_t;
-
-typedef struct {
-       u64 reset;
-       u64 output_string;
-       u64 test_string;
-} efi_simple_text_output_protocol_64_t;
-
-struct efi_simple_text_output_protocol {
-       void *reset;
-       efi_status_t (*output_string)(void *, void *);
-       void *test_string;
+union efi_simple_text_output_protocol {
+       struct {
+               void *reset;
+               efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *,
+                                                      efi_char16_t *);
+               void *test_string;
+       };
+       struct {
+               u32 reset;
+               u32 output_string;
+               u32 test_string;
+       } mixed_mode;
 };
 
 #define PIXEL_RGB_RESERVED_8BIT_PER_COLOR              0
@@ -1415,73 +1332,59 @@ struct efi_simple_text_output_protocol {
 #define PIXEL_BLT_ONLY                                 3
 #define PIXEL_FORMAT_MAX                               4
 
-struct efi_pixel_bitmask {
+typedef struct {
        u32 red_mask;
        u32 green_mask;
        u32 blue_mask;
        u32 reserved_mask;
-};
+} efi_pixel_bitmask_t;
 
-struct efi_graphics_output_mode_info {
+typedef struct {
        u32 version;
        u32 horizontal_resolution;
        u32 vertical_resolution;
        int pixel_format;
-       struct efi_pixel_bitmask pixel_information;
+       efi_pixel_bitmask_t pixel_information;
        u32 pixels_per_scan_line;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_32 {
-       u32 max_mode;
-       u32 mode;
-       u32 info;
-       u32 size_of_info;
-       u64 frame_buffer_base;
-       u32 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_64 {
-       u32 max_mode;
-       u32 mode;
-       u64 info;
-       u64 size_of_info;
-       u64 frame_buffer_base;
-       u64 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode {
-       u32 max_mode;
-       u32 mode;
-       unsigned long info;
-       unsigned long size_of_info;
-       u64 frame_buffer_base;
-       unsigned long frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_32 {
-       u32 query_mode;
-       u32 set_mode;
-       u32 blt;
-       u32 mode;
-};
+} efi_graphics_output_mode_info_t;
 
-struct efi_graphics_output_protocol_64 {
-       u64 query_mode;
-       u64 set_mode;
-       u64 blt;
-       u64 mode;
-};
+typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t;
 
-struct efi_graphics_output_protocol {
-       unsigned long query_mode;
-       unsigned long set_mode;
-       unsigned long blt;
-       struct efi_graphics_output_protocol_mode *mode;
+union efi_graphics_output_protocol_mode {
+       struct {
+               u32 max_mode;
+               u32 mode;
+               efi_graphics_output_mode_info_t *info;
+               unsigned long size_of_info;
+               efi_physical_addr_t frame_buffer_base;
+               unsigned long frame_buffer_size;
+       };
+       struct {
+               u32 max_mode;
+               u32 mode;
+               u32 info;
+               u32 size_of_info;
+               u64 frame_buffer_base;
+               u32 frame_buffer_size;
+       } mixed_mode;
 };
 
-typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
-       struct efi_graphics_output_protocol *, u32, unsigned long *,
-       struct efi_graphics_output_mode_info **);
+typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
+
+union efi_graphics_output_protocol {
+       struct {
+               void *query_mode;
+               void *set_mode;
+               void *blt;
+               efi_graphics_output_protocol_mode_t *mode;
+       };
+       struct {
+               u32 query_mode;
+               u32 set_mode;
+               u32 blt;
+               u32 mode;
+       } mixed_mode;
+};
 
 extern struct list_head efivar_sysfs_list;
 
@@ -1582,24 +1485,19 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
 
 /* prototypes shared between arch specific and generic stub code */
 
-void efi_printk(efi_system_table_t *sys_table_arg, char *str);
+void efi_printk(char *str);
 
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
-             unsigned long addr);
+void efi_free(unsigned long size, unsigned long addr);
 
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
-                         efi_loaded_image_t *image, int *cmd_line_len);
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
 
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
-                               struct efi_boot_memmap *map);
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
 
-efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
-                                unsigned long size, unsigned long align,
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
                                 unsigned long *addr, unsigned long min);
 
 static inline
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
-                          unsigned long size, unsigned long align,
+efi_status_t efi_low_alloc(unsigned long size, unsigned long align,
                           unsigned long *addr)
 {
        /*
@@ -1607,23 +1505,20 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
         * checks pointers against NULL. Skip the first 8
         * bytes so we start at a nice even number.
         */
-       return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
+       return efi_low_alloc_above(size, align, addr, 0x8);
 }
 
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
-                           unsigned long size, unsigned long align,
+efi_status_t efi_high_alloc(unsigned long size, unsigned long align,
                            unsigned long *addr, unsigned long max);
 
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
-                                unsigned long *image_addr,
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
                                 unsigned long alignment,
                                 unsigned long min_addr);
 
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
-                                 efi_loaded_image_t *image,
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
                                  char *cmd_line, char *option_string,
                                  unsigned long max_addr,
                                  unsigned long *load_addr,
@@ -1631,8 +1526,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
 efi_status_t efi_parse_options(char const *cmdline);
 
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
-                          struct screen_info *si, efi_guid_t *proto,
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
                           unsigned long size);
 
 #ifdef CONFIG_EFI
@@ -1650,18 +1544,18 @@ enum efi_secureboot_mode {
        efi_secureboot_mode_disabled,
        efi_secureboot_mode_enabled,
 };
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+enum efi_secureboot_mode efi_get_secureboot(void);
 
 #ifdef CONFIG_RESET_ATTACK_MITIGATION
-void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg);
+void efi_enable_reset_attack_mitigation(void);
 #else
 static inline void
-efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
+efi_enable_reset_attack_mitigation(void) { }
 #endif
 
-efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+efi_status_t efi_random_get_seed(void);
 
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
+void efi_retrieve_tpm2_eventlog(void);
 
 /*
  * Arch code can implement the following three template macros, avoiding
@@ -1713,12 +1607,10 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
 })
 
 typedef efi_status_t (*efi_exit_boot_map_processing)(
-       efi_system_table_t *sys_table_arg,
        struct efi_boot_memmap *map,
        void *priv);
 
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
-                                   void *handle,
+efi_status_t efi_exit_boot_services(void *handle,
                                    struct efi_boot_memmap *map,
                                    void *priv,
                                    efi_exit_boot_map_processing priv_func);
@@ -1809,4 +1701,6 @@ struct linux_efi_memreserve {
 #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
        / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
 
+void efi_pci_disable_bridge_busmaster(void);
+
 #endif /* _LINUX_EFI_H */
index a141cb07e76a7f1a4abaf4793d66459718c65462..345f3748e0fb7b090b78632a22a39ca8a5f72077 100644 (file)
@@ -420,7 +420,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
 
 #define BPF_FIELD_SIZEOF(type, field)                          \
        ({                                                      \
-               const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+               const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
                BUILD_BUG_ON(__size < 0);                       \
                __size;                                         \
        })
@@ -497,7 +497,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
 
 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                           \
        ({                                                                      \
-               BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE));             \
+               BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE));             \
                *(PTR_SIZE) = (SIZE);                                           \
                offsetof(TYPE, MEMBER);                                         \
        })
@@ -608,7 +608,7 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
 {
        struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
 
-       BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+       BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
        cb->data_meta = skb->data - skb_metadata_len(skb);
        cb->data_end  = skb->data + skb_headlen(skb);
 }
@@ -646,9 +646,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
         * attached to sockets, we need to clear the bpf_skb_cb() area
         * to not leak previous contents to user space.
         */
-       BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
-       BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
-                    FIELD_SIZEOF(struct qdisc_skb_cb, data));
+       BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+       BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
+                    sizeof_field(struct qdisc_skb_cb, data));
 
        return qdisc_skb_cb(skb)->data;
 }
index 98e0349adb52638708b2d2a89fcf5ca4947b6c8c..dddfcbb140a76820ab33fb09b08637b9bab4ff9e 100644 (file)
@@ -855,7 +855,7 @@ static inline loff_t i_size_read(const struct inode *inode)
                i_size = inode->i_size;
        } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
        return i_size;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
        loff_t i_size;
 
        preempt_disable();
@@ -880,7 +880,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
        inode->i_size = i_size;
        write_seqcount_end(&inode->i_size_seqcount);
        preempt_enable();
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
        preempt_disable();
        inode->i_size = i_size;
        preempt_enable();
index 7247d35c3d160af210f35e07393fd04da361326b..db95244a62d44db0b7d921e4a3f66ec5cf2acb39 100644 (file)
@@ -264,6 +264,7 @@ int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
                                struct dyn_ftrace *rec,
                                unsigned long old_addr,
                                unsigned long new_addr);
+unsigned long ftrace_find_rec_direct(unsigned long ip);
 #else
 # define ftrace_direct_func_count 0
 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
@@ -290,6 +291,10 @@ static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
 {
        return -ENODEV;
 }
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+{
+       return 0;
+}
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
 
 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
index 8bb63027e4d6345b5f7a3a0954bbf301b260d5ef..6fbe58538ad6303fed1aa86898d8b5d9dcc82f8d 100644 (file)
@@ -245,6 +245,18 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
                !(disk->flags & GENHD_FL_NO_PART_SCAN);
 }
 
+static inline bool disk_has_partitions(struct gendisk *disk)
+{
+       bool ret = false;
+
+       rcu_read_lock();
+       if (rcu_dereference(disk->part_tbl)->len > 1)
+               ret = true;
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static inline dev_t disk_devt(struct gendisk *disk)
 {
        return MKDEV(disk->major, disk->first_minor);
@@ -718,7 +730,7 @@ static inline void hd_free_part(struct hd_struct *part)
  * accessor function.
  *
  * Code written along the lines of i_size_read() and i_size_write().
- * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
+ * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
  * on.
  */
 static inline sector_t part_nr_sects_read(struct hd_struct *part)
@@ -731,7 +743,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
                nr_sects = part->nr_sects;
        } while (read_seqcount_retry(&part->nr_sects_seq, seq));
        return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
        sector_t nr_sects;
 
        preempt_disable();
@@ -754,7 +766,7 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
        write_seqcount_begin(&part->nr_sects_seq);
        part->nr_sects = size;
        write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
        preempt_disable();
        part->nr_sects = size;
        preempt_enable();
index 5215fdba6b9a6dea6ca572b22398d8f2accf8c6e..bf2d017dd7b71aa9b6c795a676f9169e7c0286dc 100644 (file)
@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
 
 int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
 int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
+void gpiod_toggle_active_low(struct gpio_desc *desc);
 
 int gpiod_is_active_low(const struct gpio_desc *desc);
 int gpiod_cansleep(const struct gpio_desc *desc);
@@ -483,6 +484,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
        return -ENOSYS;
 }
 
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+       /* GPIO can never have been requested */
+       WARN_ON(desc);
+}
+
 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
index 1f98b52118f0a31541f3ce6f2cb52520442b778b..15c8ac3136780fdc177fbc6f7c7342dfd79fcdcb 100644 (file)
@@ -508,8 +508,7 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
 /* Precise sleep: */
 
 extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
-extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
-                             const enum hrtimer_mode mode,
+extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
                              const clockid_t clockid);
 
 extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
index 31d4920994b9352be154e2cb980459feb38108c8..1e897e4168ac16042ad2a38112335a593e4b04f0 100644 (file)
@@ -432,7 +432,8 @@ struct hstate {
        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 #ifdef CONFIG_CGROUP_HUGETLB
        /* cgroup control files */
-       struct cftype cgroup_files[5];
+       struct cftype cgroup_files_dfl[5];
+       struct cftype cgroup_files_legacy[5];
 #endif
        char name[HSTATE_NAME_LEN];
 };
index 72579168189d47c0f3fee924c748ae7ad5831350..5e609f25878c0a6000d4207ce98740a5da99e562 100644 (file)
@@ -27,6 +27,7 @@ enum hwmon_sensor_types {
        hwmon_humidity,
        hwmon_fan,
        hwmon_pwm,
+       hwmon_intrusion,
        hwmon_max,
 };
 
@@ -59,7 +60,8 @@ enum hwmon_chip_attributes {
 #define HWMON_C_TEMP_SAMPLES           BIT(hwmon_chip_temp_samples)
 
 enum hwmon_temp_attributes {
-       hwmon_temp_input = 0,
+       hwmon_temp_enable,
+       hwmon_temp_input,
        hwmon_temp_type,
        hwmon_temp_lcrit,
        hwmon_temp_lcrit_hyst,
@@ -85,6 +87,7 @@ enum hwmon_temp_attributes {
        hwmon_temp_reset_history,
 };
 
+#define HWMON_T_ENABLE         BIT(hwmon_temp_enable)
 #define HWMON_T_INPUT          BIT(hwmon_temp_input)
 #define HWMON_T_TYPE           BIT(hwmon_temp_type)
 #define HWMON_T_LCRIT          BIT(hwmon_temp_lcrit)
@@ -111,6 +114,7 @@ enum hwmon_temp_attributes {
 #define HWMON_T_RESET_HISTORY  BIT(hwmon_temp_reset_history)
 
 enum hwmon_in_attributes {
+       hwmon_in_enable,
        hwmon_in_input,
        hwmon_in_min,
        hwmon_in_max,
@@ -126,9 +130,9 @@ enum hwmon_in_attributes {
        hwmon_in_max_alarm,
        hwmon_in_lcrit_alarm,
        hwmon_in_crit_alarm,
-       hwmon_in_enable,
 };
 
+#define HWMON_I_ENABLE         BIT(hwmon_in_enable)
 #define HWMON_I_INPUT          BIT(hwmon_in_input)
 #define HWMON_I_MIN            BIT(hwmon_in_min)
 #define HWMON_I_MAX            BIT(hwmon_in_max)
@@ -144,9 +148,9 @@ enum hwmon_in_attributes {
 #define HWMON_I_MAX_ALARM      BIT(hwmon_in_max_alarm)
 #define HWMON_I_LCRIT_ALARM    BIT(hwmon_in_lcrit_alarm)
 #define HWMON_I_CRIT_ALARM     BIT(hwmon_in_crit_alarm)
-#define HWMON_I_ENABLE         BIT(hwmon_in_enable)
 
 enum hwmon_curr_attributes {
+       hwmon_curr_enable,
        hwmon_curr_input,
        hwmon_curr_min,
        hwmon_curr_max,
@@ -164,6 +168,7 @@ enum hwmon_curr_attributes {
        hwmon_curr_crit_alarm,
 };
 
+#define HWMON_C_ENABLE         BIT(hwmon_curr_enable)
 #define HWMON_C_INPUT          BIT(hwmon_curr_input)
 #define HWMON_C_MIN            BIT(hwmon_curr_min)
 #define HWMON_C_MAX            BIT(hwmon_curr_max)
@@ -181,6 +186,7 @@ enum hwmon_curr_attributes {
 #define HWMON_C_CRIT_ALARM     BIT(hwmon_curr_crit_alarm)
 
 enum hwmon_power_attributes {
+       hwmon_power_enable,
        hwmon_power_average,
        hwmon_power_average_interval,
        hwmon_power_average_interval_max,
@@ -211,6 +217,7 @@ enum hwmon_power_attributes {
        hwmon_power_crit_alarm,
 };
 
+#define HWMON_P_ENABLE                 BIT(hwmon_power_enable)
 #define HWMON_P_AVERAGE                        BIT(hwmon_power_average)
 #define HWMON_P_AVERAGE_INTERVAL       BIT(hwmon_power_average_interval)
 #define HWMON_P_AVERAGE_INTERVAL_MAX   BIT(hwmon_power_average_interval_max)
@@ -241,14 +248,17 @@ enum hwmon_power_attributes {
 #define HWMON_P_CRIT_ALARM             BIT(hwmon_power_crit_alarm)
 
 enum hwmon_energy_attributes {
+       hwmon_energy_enable,
        hwmon_energy_input,
        hwmon_energy_label,
 };
 
+#define HWMON_E_ENABLE                 BIT(hwmon_energy_enable)
 #define HWMON_E_INPUT                  BIT(hwmon_energy_input)
 #define HWMON_E_LABEL                  BIT(hwmon_energy_label)
 
 enum hwmon_humidity_attributes {
+       hwmon_humidity_enable,
        hwmon_humidity_input,
        hwmon_humidity_label,
        hwmon_humidity_min,
@@ -259,6 +269,7 @@ enum hwmon_humidity_attributes {
        hwmon_humidity_fault,
 };
 
+#define HWMON_H_ENABLE                 BIT(hwmon_humidity_enable)
 #define HWMON_H_INPUT                  BIT(hwmon_humidity_input)
 #define HWMON_H_LABEL                  BIT(hwmon_humidity_label)
 #define HWMON_H_MIN                    BIT(hwmon_humidity_min)
@@ -269,6 +280,7 @@ enum hwmon_humidity_attributes {
 #define HWMON_H_FAULT                  BIT(hwmon_humidity_fault)
 
 enum hwmon_fan_attributes {
+       hwmon_fan_enable,
        hwmon_fan_input,
        hwmon_fan_label,
        hwmon_fan_min,
@@ -282,6 +294,7 @@ enum hwmon_fan_attributes {
        hwmon_fan_fault,
 };
 
+#define HWMON_F_ENABLE                 BIT(hwmon_fan_enable)
 #define HWMON_F_INPUT                  BIT(hwmon_fan_input)
 #define HWMON_F_LABEL                  BIT(hwmon_fan_label)
 #define HWMON_F_MIN                    BIT(hwmon_fan_min)
@@ -306,6 +319,13 @@ enum hwmon_pwm_attributes {
 #define HWMON_PWM_MODE                 BIT(hwmon_pwm_mode)
 #define HWMON_PWM_FREQ                 BIT(hwmon_pwm_freq)
 
+enum hwmon_intrusion_attributes {
+       hwmon_intrusion_alarm,
+       hwmon_intrusion_beep,
+};
+#define HWMON_INTRUSION_ALARM          BIT(hwmon_intrusion_alarm)
+#define HWMON_INTRUSION_BEEP           BIT(hwmon_intrusion_beep)
+
 /**
  * struct hwmon_ops - hwmon device operations
  * @is_visible: Callback to return attribute visibility. Mandatory.
index d2f786706657b1f075f9091761aff0191248e9cf..582ef05ec07ed8fbc69e38a69c714420c72f9066 100644 (file)
@@ -300,6 +300,7 @@ struct i2c_driver {
  *     generic enough to hide second-sourcing and compatible revisions.
  * @adapter: manages the bus segment hosting this I2C device
  * @dev: Driver model device node for the slave.
+ * @init_irq: IRQ that was set at initialization
  * @irq: indicates the IRQ generated by this device (if any)
  * @detected: member of an i2c_driver.clients list or i2c-core's
  *     userspace_devices list
@@ -466,12 +467,6 @@ i2c_new_probed_device(struct i2c_adapter *adap,
 /* Common custom probe functions */
 extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
 
-/* For devices that use several addresses, use i2c_new_dummy() to make
- * client handles for the extra addresses.
- */
-extern struct i2c_client *
-i2c_new_dummy(struct i2c_adapter *adap, u16 address);
-
 extern struct i2c_client *
 i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
 
@@ -856,6 +851,11 @@ extern void i2c_del_driver(struct i2c_driver *driver);
 #define i2c_add_driver(driver) \
        i2c_register_driver(THIS_MODULE, driver)
 
+static inline bool i2c_client_has_driver(struct i2c_client *client)
+{
+       return !IS_ERR_OR_NULL(client) && client->dev.driver;
+}
+
 /* call the i2c_client->command() of all attached clients with
  * the given arguments */
 extern void i2c_clients_command(struct i2c_adapter *adap,
index 76cf11e905e160b32625adefd1bcc7b641ff6527..8a9792a6427ad9cf58b50c79cbfe185615800dcb 100644 (file)
@@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
        return (struct ethhdr *)skb_mac_header(skb);
 }
 
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct ethhdr *)skb->data;
+}
+
 static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
 {
        return (struct ethhdr *)skb_inner_mac_header(skb);
index d77fe34fb00a0c23b36d6698ab385ea9553d282e..aa5914355728686ff76b31a014cbbab9d619683a 100644 (file)
@@ -28,3 +28,5 @@ extern unsigned int real_root_dev;
 
 extern char __initramfs_start[];
 extern unsigned long __initramfs_size;
+
+void console_on_rootfs(void);
index a59834bc0a114f99dae0ae91f18fd3b1d870979e..b1c44bb4b2d750ca95235a04df270aa288c027b5 100644 (file)
@@ -66,8 +66,6 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
                           resource_size_t size);
 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
                                   resource_size_t size);
-void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
-                                  resource_size_t size);
 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
                                   resource_size_t size);
 void devm_iounmap(struct device *dev, void __iomem *addr);
@@ -87,7 +85,7 @@ void *__devm_memremap_pages(struct device *dev, struct resource *res);
  * Posting") mandate non-posted configuration transactions. There is
  * no ioremap API in the kernel that can guarantee non-posted write
  * semantics across arches so provide a default implementation for
- * mapping PCI config space that defaults to ioremap_nocache(); arches
+ * mapping PCI config space that defaults to ioremap(); arches
  * should override it if they have memory mapping implementations that
  * guarantee non-posted writes semantics to make the memory mapping
  * compliant with the PCI specification.
@@ -97,7 +95,7 @@ void *__devm_memremap_pages(struct device *dev, struct resource *res);
 static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
                                               size_t size)
 {
-       return ioremap_nocache(offset, size);
+       return ioremap(offset, size);
 }
 #endif
 #endif
index de991d6633a5f99e929ca191506c87855a165e13..f0b8ca766e7df643f229082687b847a3ff85dc6e 100644 (file)
@@ -13,6 +13,7 @@
 #define GICD_CTLR                      0x0000
 #define GICD_TYPER                     0x0004
 #define GICD_IIDR                      0x0008
+#define GICD_TYPER2                    0x000C
 #define GICD_STATUSR                   0x0010
 #define GICD_SETSPI_NSR                        0x0040
 #define GICD_CLRSPI_NSR                        0x0048
@@ -89,6 +90,9 @@
 #define GICD_TYPER_ESPIS(typer)                                                \
        (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
 
+#define GICD_TYPER2_VIL                        (1U << 7)
+#define GICD_TYPER2_VID                        GENMASK(4, 0)
+
 #define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
 #define GICD_IROUTER_SPI_MODE_ANY      (1U << 31)
 
 
 #define GIC_V3_DIST_SIZE               0x10000
 
+#define GIC_PAGE_SIZE_4K               0ULL
+#define GIC_PAGE_SIZE_16K              1ULL
+#define GIC_PAGE_SIZE_64K              2ULL
+#define GIC_PAGE_SIZE_MASK             3ULL
+
 /*
  * Re-Distributor registers, offsets from RD_base
  */
 #define GICR_TYPER_VLPIS               (1U << 1)
 #define GICR_TYPER_DirectLPIS          (1U << 3)
 #define GICR_TYPER_LAST                        (1U << 4)
+#define GICR_TYPER_RVPEID              (1U << 7)
+#define GICR_TYPER_COMMON_LPI_AFF      GENMASK_ULL(25, 24)
+#define GICR_TYPER_AFFINITY            GENMASK_ULL(63, 32)
+
+#define GICR_INVLPIR_INTID             GENMASK_ULL(31, 0)
+#define GICR_INVLPIR_VPEID             GENMASK_ULL(47, 32)
+#define GICR_INVLPIR_V                 GENMASK_ULL(63, 63)
+
+#define GICR_INVALLR_VPEID             GICR_INVLPIR_VPEID
+#define GICR_INVALLR_V                 GICR_INVLPIR_V
 
 #define GIC_V3_REDIST_SIZE             0x20000
 
 #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
 #define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
 
+/*
+ * GICv4.1 VPROPBASER reinvention. A subtle mix between the old
+ * VPROPBASER and ITS_BASER. Just not quite any of the two.
+ */
+#define GICR_VPROPBASER_4_1_VALID      (1ULL << 63)
+#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59)
+#define GICR_VPROPBASER_4_1_INDIRECT   (1ULL << 55)
+#define GICR_VPROPBASER_4_1_PAGE_SIZE  GENMASK_ULL(54, 53)
+#define GICR_VPROPBASER_4_1_Z          (1ULL << 52)
+#define GICR_VPROPBASER_4_1_ADDR       GENMASK_ULL(51, 12)
+#define GICR_VPROPBASER_4_1_SIZE       GENMASK_ULL(6, 0)
+
 #define GICR_VPENDBASER                        0x0078
 
 #define GICR_VPENDBASER_SHAREABILITY_SHIFT             (10)
 #define GICR_VPENDBASER_IDAI           (1ULL << 62)
 #define GICR_VPENDBASER_Valid          (1ULL << 63)
 
+/*
+ * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields,
+ * also use the above Valid, PendingLast and Dirty.
+ */
+#define GICR_VPENDBASER_4_1_DB         (1ULL << 62)
+#define GICR_VPENDBASER_4_1_VGRP0EN    (1ULL << 59)
+#define GICR_VPENDBASER_4_1_VGRP1EN    (1ULL << 58)
+#define GICR_VPENDBASER_4_1_VPEID      GENMASK_ULL(15, 0)
+
 /*
  * ITS registers, offsets from ITS_base
  */
 #define GITS_CTLR                      0x0000
 #define GITS_IIDR                      0x0004
 #define GITS_TYPER                     0x0008
+#define GITS_MPIDR                     0x0018
 #define GITS_CBASER                    0x0080
 #define GITS_CWRITER                   0x0088
 #define GITS_CREADR                    0x0090
 #define GITS_TYPER_HCC_SHIFT           24
 #define GITS_TYPER_HCC(r)              (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)
 #define GITS_TYPER_VMOVP               (1ULL << 37)
+#define GITS_TYPER_VMAPP               (1ULL << 40)
+#define GITS_TYPER_SVPET               GENMASK_ULL(42, 41)
 
 #define GITS_IIDR_REV_SHIFT            12
 #define GITS_IIDR_REV_MASK             (0xf << GITS_IIDR_REV_SHIFT)
 #define GITS_BASER_InnerShareable                                      \
        GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
 #define GITS_BASER_PAGE_SIZE_SHIFT     (8)
-#define GITS_BASER_PAGE_SIZE_4K                (0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K       (1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K       (2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK      (3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define __GITS_BASER_PSZ(sz)           (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K                __GITS_BASER_PSZ(4K)
+#define GITS_BASER_PAGE_SIZE_16K       __GITS_BASER_PSZ(16K)
+#define GITS_BASER_PAGE_SIZE_64K       __GITS_BASER_PSZ(64K)
+#define GITS_BASER_PAGE_SIZE_MASK      __GITS_BASER_PSZ(MASK)
 #define GITS_BASER_PAGES_MAX           256
 #define GITS_BASER_PAGES_SHIFT         (0)
 #define GITS_BASER_NR_PAGES(r)         (((r) & 0xff) + 1)
 #define GITS_CMD_VMAPTI                        GITS_CMD_GICv4(GITS_CMD_MAPTI)
 #define GITS_CMD_VMOVI                 GITS_CMD_GICv4(GITS_CMD_MOVI)
 #define GITS_CMD_VSYNC                 GITS_CMD_GICv4(GITS_CMD_SYNC)
-/* VMOVP is the odd one, as it doesn't have a physical counterpart */
+/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */
 #define GITS_CMD_VMOVP                 GITS_CMD_GICv4(2)
+#define GITS_CMD_INVDB                 GITS_CMD_GICv4(0xe)
 
 /*
  * ITS error numbers
@@ -607,14 +652,18 @@ struct rdists {
        struct {
                void __iomem    *rd_base;
                struct page     *pend_page;
+               struct page     *vpe_l1_page;
                phys_addr_t     phys_base;
                bool            lpi_enabled;
+               cpumask_t       *vpe_table_mask;
        } __percpu              *rdist;
        phys_addr_t             prop_table_pa;
        void                    *prop_table_va;
        u64                     flags;
        u32                     gicd_typer;
+       u32                     gicd_typer2;
        bool                    has_vlpis;
+       bool                    has_rvpeid;
        bool                    has_direct_lpi;
 };
 
index 5dbcfc65f21ec0e977c28fd227b626ae1eadef3e..d9c34968467a56c1cdad0560d3156f152997040a 100644 (file)
@@ -39,8 +39,20 @@ struct its_vpe {
        irq_hw_number_t         vpe_db_lpi;
        /* VPE resident */
        bool                    resident;
-       /* VPE proxy mapping */
-       int                     vpe_proxy_event;
+       union {
+               /* GICv4.0 implementations */
+               struct {
+                       /* VPE proxy mapping */
+                       int     vpe_proxy_event;
+                       /* Implementation Defined Area Invalid */
+                       bool    idai;
+               };
+               /* GICv4.1 implementations */
+               struct {
+                       atomic_t vmapp_count;
+               };
+       };
+
        /*
         * This collection ID is used to indirect the target
         * redistributor for this VPE. The ID itself isn't involved in
@@ -49,8 +61,6 @@ struct its_vpe {
        u16                     col_idx;
        /* Unique (system-wide) VPE identifier */
        u16                     vpe_id;
-       /* Implementation Defined Area Invalid */
-       bool                    idai;
        /* Pending VLPIs on schedule out? */
        bool                    pending_last;
 };
@@ -90,6 +100,11 @@ struct its_cmd_info {
        union {
                struct its_vlpi_map     *map;
                u8                      config;
+               bool                    req_db;
+               struct {
+                       bool            g0en;
+                       bool            g1en;
+               };
        };
 };
 
index 3c340dbc5a1ffe814c819b95fc100b2020375f8d..698749f42cede798af409cddc2f3575210127f2c 100644 (file)
@@ -427,6 +427,11 @@ int irq_domain_translate_twocell(struct irq_domain *d,
                                 unsigned long *out_hwirq,
                                 unsigned int *out_type);
 
+int irq_domain_translate_onecell(struct irq_domain *d,
+                                struct irq_fwspec *fwspec,
+                                unsigned long *out_hwirq,
+                                unsigned int *out_type);
+
 /* IPI functions */
 int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
 int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
index 29dce6ff6bae88fd8c367a47b0fca1eb0f790481..ce44b687d02ba16d0b10334cf02b3400b2ee2b4a 100644 (file)
@@ -457,7 +457,7 @@ struct jbd2_revoke_table_s;
  * @h_journal: Which journal handle belongs to - used iff h_reserved set.
  * @h_rsv_handle: Handle reserved for finishing the logical operation.
  * @h_total_credits: Number of remaining buffers we are allowed to add to
      journal. These are dirty buffers and revoke descriptor blocks.
*     journal. These are dirty buffers and revoke descriptor blocks.
  * @h_revoke_credits: Number of remaining revoke records available for handle
  * @h_ref: Reference count on this handle.
  * @h_err: Field for caller's use to track errors through large fs operations.
index 4f404c565db1baa7688e3a4527b9523da009c64d..e18fe54969e97ede27c426277d84601d0c6aec6b 100644 (file)
@@ -205,20 +205,23 @@ static inline void *kasan_reset_tag(const void *addr)
 #endif /* CONFIG_KASAN_SW_TAGS */
 
 #ifdef CONFIG_KASAN_VMALLOC
-int kasan_populate_vmalloc(unsigned long requested_size,
-                          struct vm_struct *area);
-void kasan_poison_vmalloc(void *start, unsigned long size);
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+void kasan_poison_vmalloc(const void *start, unsigned long size);
+void kasan_unpoison_vmalloc(const void *start, unsigned long size);
 void kasan_release_vmalloc(unsigned long start, unsigned long end,
                           unsigned long free_region_start,
                           unsigned long free_region_end);
 #else
-static inline int kasan_populate_vmalloc(unsigned long requested_size,
-                                        struct vm_struct *area)
+static inline int kasan_populate_vmalloc(unsigned long start,
+                                       unsigned long size)
 {
        return 0;
 }
 
-static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
+{ }
 static inline void kasan_release_vmalloc(unsigned long start,
                                         unsigned long end,
                                         unsigned long free_region_start,
index 3adcb39fa6f5c7e3acb75c8bf3c9ecebc3b1aaa6..0d9db2a14f4444d428c096696dbff51ac7ce5d49 100644 (file)
  */
 #define round_down(x, y) ((x) & ~__round_mask(x, y))
 
-/**
- * FIELD_SIZEOF - get the size of a struct's field
- * @t: the target struct
- * @f: the target struct's field
- * Return: the size of @f in the struct definition without having a
- * declared instance of @t.
- */
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-
 #define typeof_member(T, m)    typeof(((T*)0)->m)
 
 #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
index 7ed1e2f8641e2f2d2d7f17266480b35450338f22..538c25e778c07eec9d26fdd1585247fe81de4544 100644 (file)
@@ -149,7 +149,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQUEST_ARCH_BASE     8
 
 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
-       BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
+       BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
        (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
 })
 #define KVM_ARCH_REQ(nr)           KVM_ARCH_REQ_FLAGS(nr, 0)
index d3bbfddf616aac6de2134e2adf1e8f416b9a9abe..2dbde119721df4b63982b3dab853ba9ab51ea822 100644 (file)
@@ -1175,6 +1175,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id);
 extern void ata_qc_complete(struct ata_queued_cmd *qc);
 extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
 extern int ata_std_bios_param(struct scsi_device *sdev,
                              struct block_device *bdev,
index 85c92555e31f85f019354e54d6efb8e79c2aee17..884216db324640e7d5d9b47271b6cdc26d256aca 100644 (file)
 #define LIST_HEAD(name) \
        struct list_head name = LIST_HEAD_INIT(name)
 
+/**
+ * INIT_LIST_HEAD - Initialize a list_head structure
+ * @list: list_head structure to be initialized.
+ *
+ * Initializes the list_head to point to itself.  If it is a list header,
+ * the result is an empty list.
+ */
 static inline void INIT_LIST_HEAD(struct list_head *list)
 {
        WRITE_ONCE(list->next, list);
@@ -120,12 +127,6 @@ static inline void __list_del_clearprev(struct list_head *entry)
        entry->prev = NULL;
 }
 
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
 static inline void __list_del_entry(struct list_head *entry)
 {
        if (!__list_del_entry_valid(entry))
@@ -134,6 +135,12 @@ static inline void __list_del_entry(struct list_head *entry)
        __list_del(entry->prev, entry->next);
 }
 
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
 static inline void list_del(struct list_head *entry)
 {
        __list_del_entry(entry);
@@ -157,8 +164,15 @@ static inline void list_replace(struct list_head *old,
        new->prev->next = new;
 }
 
+/**
+ * list_replace_init - replace old entry by new one and initialize the old one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
 static inline void list_replace_init(struct list_head *old,
-                                       struct list_head *new)
+                                    struct list_head *new)
 {
        list_replace(old, new);
        INIT_LIST_HEAD(old);
@@ -538,6 +552,16 @@ static inline void list_splice_tail_init(struct list_head *list,
 #define list_for_each(pos, head) \
        for (pos = (head)->next; pos != (head); pos = pos->next)
 
+/**
+ * list_for_each_continue - continue iteration over a list
+ * @pos:       the &struct list_head to use as a loop cursor.
+ * @head:      the head for your list.
+ *
+ * Continue to iterate over a list, continuing after the current position.
+ */
+#define list_for_each_continue(pos, head) \
+       for (pos = pos->next; pos != (head); pos = pos->next)
+
 /**
  * list_for_each_prev  -       iterate over a list backwards
  * @pos:       the &struct list_head to use as a loop cursor.
@@ -744,11 +768,36 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
        h->pprev = NULL;
 }
 
+/**
+ * hlist_unhashed - Has node been removed from list and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed
+ * state.  For example, hlist_nulls_del_init_rcu() does leave the
+ * node in unhashed state, but hlist_nulls_del() does not.
+ */
 static inline int hlist_unhashed(const struct hlist_node *h)
 {
        return !h->pprev;
 }
 
+/**
+ * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
+ * @h: Node to be checked
+ *
+ * This variant of hlist_unhashed() must be used in lockless contexts
+ * to avoid potential load-tearing.  The READ_ONCE() is paired with the
+ * various WRITE_ONCE() in hlist helpers that are defined below.
+ */
+static inline int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+       return !READ_ONCE(h->pprev);
+}
+
+/**
+ * hlist_empty - Is the specified hlist_head structure an empty hlist?
+ * @h: Structure to check.
+ */
 static inline int hlist_empty(const struct hlist_head *h)
 {
        return !READ_ONCE(h->first);
@@ -761,9 +810,16 @@ static inline void __hlist_del(struct hlist_node *n)
 
        WRITE_ONCE(*pprev, next);
        if (next)
-               next->pprev = pprev;
+               WRITE_ONCE(next->pprev, pprev);
 }
 
+/**
+ * hlist_del - Delete the specified hlist_node from its list
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in hashed state.  Use
+ * hlist_del_init() or similar instead to unhash @n.
+ */
 static inline void hlist_del(struct hlist_node *n)
 {
        __hlist_del(n);
@@ -771,6 +827,12 @@ static inline void hlist_del(struct hlist_node *n)
        n->pprev = LIST_POISON2;
 }
 
+/**
+ * hlist_del_init - Delete the specified hlist_node from its list and initialize
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in unhashed state.
+ */
 static inline void hlist_del_init(struct hlist_node *n)
 {
        if (!hlist_unhashed(n)) {
@@ -779,51 +841,83 @@ static inline void hlist_del_init(struct hlist_node *n)
        }
 }
 
+/**
+ * hlist_add_head - add a new entry at the beginning of the hlist
+ * @n: new entry to be added
+ * @h: hlist head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
 {
        struct hlist_node *first = h->first;
-       n->next = first;
+       WRITE_ONCE(n->next, first);
        if (first)
-               first->pprev = &n->next;
+               WRITE_ONCE(first->pprev, &n->next);
        WRITE_ONCE(h->first, n);
-       n->pprev = &h->first;
+       WRITE_ONCE(n->pprev, &h->first);
 }
 
-/* next must be != NULL */
+/**
+ * hlist_add_before - add a new entry before the one specified
+ * @n: new entry to be added
+ * @next: hlist node to add it before, which must be non-NULL
+ */
 static inline void hlist_add_before(struct hlist_node *n,
-                                       struct hlist_node *next)
+                                   struct hlist_node *next)
 {
-       n->pprev = next->pprev;
-       n->next = next;
-       next->pprev = &n->next;
+       WRITE_ONCE(n->pprev, next->pprev);
+       WRITE_ONCE(n->next, next);
+       WRITE_ONCE(next->pprev, &n->next);
        WRITE_ONCE(*(n->pprev), n);
 }
 
+/**
+ * hlist_add_behing - add a new entry after the one specified
+ * @n: new entry to be added
+ * @prev: hlist node to add it after, which must be non-NULL
+ */
 static inline void hlist_add_behind(struct hlist_node *n,
                                    struct hlist_node *prev)
 {
-       n->next = prev->next;
-       prev->next = n;
-       n->pprev = &prev->next;
+       WRITE_ONCE(n->next, prev->next);
+       WRITE_ONCE(prev->next, n);
+       WRITE_ONCE(n->pprev, &prev->next);
 
        if (n->next)
-               n->next->pprev  = &n->next;
+               WRITE_ONCE(n->next->pprev, &n->next);
 }
 
-/* after that we'll appear to be on some hlist and hlist_del will work */
+/**
+ * hlist_add_fake - create a fake hlist consisting of a single headless node
+ * @n: Node to make a fake list out of
+ *
+ * This makes @n appear to be its own predecessor on a headless hlist.
+ * The point of this is to allow things like hlist_del() to work correctly
+ * in cases where there is no list.
+ */
 static inline void hlist_add_fake(struct hlist_node *n)
 {
        n->pprev = &n->next;
 }
 
+/**
+ * hlist_fake: Is this node a fake hlist?
+ * @h: Node to check for being a self-referential fake hlist.
+ */
 static inline bool hlist_fake(struct hlist_node *h)
 {
        return h->pprev == &h->next;
 }
 
-/*
+/**
+ * hlist_is_singular_node - is node the only element of the specified hlist?
+ * @n: Node to check for singularity.
+ * @h: Header for potentially singular list.
+ *
  * Check whether the node is the only node of the head without
- * accessing head:
+ * accessing head, thus avoiding unnecessary cache misses.
  */
 static inline bool
 hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
@@ -831,7 +925,11 @@ hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
        return !n->next && n->pprev == &h->first;
 }
 
-/*
+/**
+ * hlist_move_list - Move an hlist
+ * @old: hlist_head for old list.
+ * @new: hlist_head for new list.
+ *
  * Move a list from one list head to another. Fixup the pprev
  * reference of the first entry if it exists.
  */
index 3ef96743db8da3868744efc92d812c9b746bf9c7..fa6e8471bd2278f939b7084bd1b2655ef253df71 100644 (file)
@@ -56,11 +56,33 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
        return ((unsigned long)ptr) >> 1;
 }
 
+/**
+ * hlist_nulls_unhashed - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not.
+ */
 static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
 {
        return !h->pprev;
 }
 
+/**
+ * hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not.  Unlike hlist_nulls_unhashed(), this
+ * function may be used locklessly.
+ */
+static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
+{
+       return !READ_ONCE(h->pprev);
+}
+
 static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
 {
        return is_a_nulls(READ_ONCE(h->first));
@@ -72,10 +94,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
        struct hlist_nulls_node *first = h->first;
 
        n->next = first;
-       n->pprev = &h->first;
+       WRITE_ONCE(n->pprev, &h->first);
        h->first = n;
        if (!is_a_nulls(first))
-               first->pprev = &n->next;
+               WRITE_ONCE(first->pprev, &n->next);
 }
 
 static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -85,13 +107,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
 
        WRITE_ONCE(*pprev, next);
        if (!is_a_nulls(next))
-               next->pprev = pprev;
+               WRITE_ONCE(next->pprev, pprev);
 }
 
 static inline void hlist_nulls_del(struct hlist_nulls_node *n)
 {
        __hlist_nulls_del(n);
-       n->pprev = LIST_POISON2;
+       WRITE_ONCE(n->pprev, LIST_POISON2);
 }
 
 /**
index 915330abf6e58fc2ecf494c498b7d266b0d56448..99d629fd994457589410ee08344f83b2c69cfdda 100644 (file)
@@ -74,6 +74,7 @@ struct common_audit_data {
 #define LSM_AUDIT_DATA_FILE    12
 #define LSM_AUDIT_DATA_IBPKEY  13
 #define LSM_AUDIT_DATA_IBENDPORT 14
+#define LSM_AUDIT_DATA_LOCKDOWN 15
        union   {
                struct path path;
                struct dentry *dentry;
@@ -93,6 +94,7 @@ struct common_audit_data {
                struct file *file;
                struct lsm_ibpkey_audit *ibpkey;
                struct lsm_ibendport_audit *ibendport;
+               int reason;
        } u;
        /* this union contains LSM specific data */
        union {
index 3a08ecdfca116d1cdbbbb383e10e282af7065749..ba0dca6aac6ee307d90b93e1f0dea40e4a5f82c7 100644 (file)
@@ -122,8 +122,8 @@ static inline bool movable_node_is_enabled(void)
 
 extern void arch_remove_memory(int nid, u64 start, u64 size,
                               struct vmem_altmap *altmap);
-extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
-                          unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+                          struct vmem_altmap *altmap);
 
 /* reasonably generic interface to expand the physical pages */
 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -342,6 +342,9 @@ extern int add_memory(int nid, u64 start, u64 size);
 extern int add_memory_resource(int nid, struct resource *resource);
 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+                                      unsigned long start_pfn,
+                                      unsigned long nr_pages);
 extern bool is_memblock_offlined(struct memory_block *mem);
 extern int sparse_add_section(int nid, unsigned long pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap);
index f84b9163c0ee60f47a4afa1a3f03930881e73859..7dfb63b8137355ef6cf83ebce12cc54ff7d91aa8 100644 (file)
 
 #define RTC_AL_SEC             0x0018
 
+#define RTC_AL_SEC_MASK        0x003f
+#define RTC_AL_MIN_MASK        0x003f
+#define RTC_AL_HOU_MASK        0x001f
+#define RTC_AL_DOM_MASK        0x001f
+#define RTC_AL_DOW_MASK        0x0007
+#define RTC_AL_MTH_MASK        0x000f
+#define RTC_AL_YEA_MASK        0x007f
+
 #define RTC_PDN2               0x002e
 #define RTC_PDN2_PWRON_ALARM   BIT(4)
 
index 739b7bf37eaa5aa615d44fee25de958f61cc8f0f..8ba042430d8e9314f1bd3f0934ddf51c263c2354 100644 (file)
@@ -79,9 +79,6 @@
 /* Some controllers have a CBSY bit */
 #define TMIO_MMC_HAVE_CBSY             BIT(11)
 
-/* Some controllers that support HS400 use 4 taps while others use 8. */
-#define TMIO_MMC_HAVE_4TAP_HS400       BIT(13)
-
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
 void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
index c97ea3b694e65b5b6b48d861d9b98ad388fae6f4..67f8451b9a12e456a7745d379d20bf5e89acdd14 100644 (file)
@@ -625,24 +625,19 @@ unsigned long vmalloc_to_pfn(const void *addr);
  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
  * is no special casing required.
  */
-static inline bool is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
-       unsigned long addr = (unsigned long)x;
-
-       return addr >= VMALLOC_START && addr < VMALLOC_END;
-#else
-       return false;
-#endif
-}
 
 #ifndef is_ioremap_addr
 #define is_ioremap_addr(x) is_vmalloc_addr(x)
 #endif
 
 #ifdef CONFIG_MMU
+extern bool is_vmalloc_addr(const void *x);
 extern int is_vmalloc_or_module_addr(const void *x);
 #else
+static inline bool is_vmalloc_addr(const void *x)
+{
+       return false;
+}
 static inline int is_vmalloc_or_module_addr(const void *x)
 {
        return 0;
@@ -2621,6 +2616,9 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
                               unsigned long size, pte_fn_t fn, void *data);
+extern int apply_to_existing_page_range(struct mm_struct *mm,
+                                  unsigned long address, unsigned long size,
+                                  pte_fn_t fn, void *data);
 
 #ifdef CONFIG_PAGE_POISONING
 extern bool page_poisoning_enabled(void);
@@ -2655,13 +2653,25 @@ static inline bool want_init_on_free(void)
               !page_poisoning_enabled();
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern void init_debug_pagealloc(void);
 #else
-DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+static inline void init_debug_pagealloc(void) {}
 #endif
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
 
 static inline bool debug_pagealloc_enabled(void)
+{
+       return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+               _debug_pagealloc_enabled_early;
+}
+
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
 {
        if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
                return false;
index 0de3d7c016cdae8cf2b6ec1c78e7619181b2409e..4ae2f2908f9938a65b7b5a612cd5390f4dcf6c53 100644 (file)
@@ -17,10 +17,9 @@ int mmc_gpio_get_ro(struct mmc_host *host);
 int mmc_gpio_get_cd(struct mmc_host *host);
 int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
                         unsigned int idx, bool override_active_level,
-                        unsigned int debounce, bool *gpio_invert);
+                        unsigned int debounce);
 int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
-                        unsigned int idx,
-                        unsigned int debounce, bool *gpio_invert);
+                        unsigned int idx, unsigned int debounce);
 void mmc_gpio_set_cd_isr(struct mmc_host *host,
                         irqreturn_t (*isr)(int irq, void *dev_id));
 int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
index 89d8ff06c9ce0f98315697a70554726cf20ae391..5334ad8fc7bd9613e2ace8a7b3f2de14fa829a4a 100644 (file)
@@ -215,9 +215,8 @@ enum node_stat_item {
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
-       NR_SLAB_RECLAIMABLE,    /* Please do not reorder this item */
-       NR_SLAB_UNRECLAIMABLE,  /* and this one without looking at
-                                * memcg_flush_percpu_vmstats() first. */
+       NR_SLAB_RECLAIMABLE,
+       NR_SLAB_UNRECLAIMABLE,
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        WORKINGSET_NODES,
index 5714fd35a83c43ca40c854ef5b1ddf8350da8aef..e3596db077dc598f3a44c2e47086d0b017d4e551 100644 (file)
@@ -587,9 +587,9 @@ struct platform_device_id {
 #define MDIO_NAME_SIZE         32
 #define MDIO_MODULE_PREFIX     "mdio:"
 
-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
 #define MDIO_ID_ARGS(_id) \
-       (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1,   \
+       ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
        ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
        ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
        ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
index bd165ba68617572d7496537ed7c04c94d3ad952e..0c7366c317bd81dcb3a676d73a5fd492a4042cba 100644 (file)
@@ -849,13 +849,9 @@ extern int module_sysfs_initialized;
 #define __MODULE_STRING(x) __stringify(x)
 
 #ifdef CONFIG_STRICT_MODULE_RWX
-extern void set_all_modules_text_rw(void);
-extern void set_all_modules_text_ro(void);
 extern void module_enable_ro(const struct module *mod, bool after_init);
 extern void module_disable_ro(const struct module *mod);
 #else
-static inline void set_all_modules_text_rw(void) { }
-static inline void set_all_modules_text_ro(void) { }
 static inline void module_enable_ro(const struct module *mod, bool after_init) { }
 static inline void module_disable_ro(const struct module *mod) { }
 #endif
index ecc88a41792a3a959bc539f1eb341426accdd856..c04f690871ca18f5f15cd9c4e8cfee3b4ef77d0f 100644 (file)
@@ -40,7 +40,7 @@ typedef enum {
        FL_READING,
        FL_CACHEDPRG,
        /* These 4 come from onenand_state_t, which has been unified here */
-       FL_RESETING,
+       FL_RESETTING,
        FL_OTPING,
        FL_PREPARING_ERASE,
        FL_VERIFYING_ERASE,
index 7fe7b87a3ded914ab89fd021aed3bed9218ef5b2..07bfb08740330f90b121bcb81cc30436ef7c73aa 100644 (file)
@@ -34,7 +34,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 
 /* internal use only */
 #define LOOKUP_PARENT          0x0010
-#define LOOKUP_NO_REVAL                0x0080
 #define LOOKUP_JUMPED          0x1000
 #define LOOKUP_ROOT            0x2000
 #define LOOKUP_ROOT_GRABBED    0x0008
index 9ef20389622d7bbefbf5ab30a2897ba8b2290cb1..cac56fb59af85cf25c3e3b3e18faae58248c801d 100644 (file)
@@ -1775,7 +1775,7 @@ enum netdev_priv_flags {
  *                     for hardware timestamping
  *     @sfp_bus:       attached &struct sfp_bus structure.
  *     @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
                              spinlock
*                             spinlock
  *     @qdisc_running_key:     lockdep class annotating Qdisc->running seqcount
  *     @qdisc_xmit_lock_key:   lockdep class annotating
  *                             netdev_queue->_xmit_lock spinlock
@@ -3698,6 +3698,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
 int dev_get_alias(const struct net_device *, char *, size_t);
 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 int __dev_set_mtu(struct net_device *, int);
+int dev_validate_mtu(struct net_device *dev, int mtu,
+                    struct netlink_ext_ack *extack);
 int dev_set_mtu_ext(struct net_device *dev, int mtu,
                    struct netlink_ext_ack *extack);
 int dev_set_mtu(struct net_device *, int);
index 4d8b1eaf7708ddbd274aa3fe6912fec34c420faa..908d38dbcb91f0b1e7040b230c2506fb3f67f14e 100644 (file)
@@ -426,13 +426,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
               sizeof(*addr));
 }
 
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
-{
-       return 4 * ((((b - a + 8) / 8) + 3) / 4);
-}
-
 /* How often should the gc be run by default */
 #define IPSET_GC_TIME                  (3 * 60)
 
index cf09ab37b45b7d5990c388799b7f502d500cd182..851425c3178f178d3c325773f2a77fa9ec6e3c40 100644 (file)
@@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
        const struct nfnl_callback *cb; /* callback for individual types */
        struct module *owner;
        int (*commit)(struct net *net, struct sk_buff *skb);
-       int (*abort)(struct net *net, struct sk_buff *skb);
+       int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
        void (*cleanup)(struct net *net);
        bool (*valid_genid)(struct net *net, u32 genid);
 };
index 2ae1b1a4d84d4429fdc653461590912e2af3906d..074f395b9ad25484871d2452325476253cb23932 100644 (file)
@@ -35,6 +35,8 @@ struct nsproxy {
        struct mnt_namespace *mnt_ns;
        struct pid_namespace *pid_ns_for_children;
        struct net           *net_ns;
+       struct time_namespace *time_ns;
+       struct time_namespace *time_ns_for_children;
        struct cgroup_namespace *cgroup_ns;
 };
 extern struct nsproxy init_nsproxy;
index 10f81629b9cecc71fbf3bb0d906f6389b2d4b963..6d0d70f3219c5b80fccb2314acf0d2bc8de7f129 100644 (file)
@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
  *
  * Host/Initiator Transport Entrypoints/Parameters:
  *
+ * @module:  The LLDD module using the interface
+ *
  * @localport_delete:  The LLDD initiates deletion of a localport via
  *       nvme_fc_deregister_localport(). However, the teardown is
  *       asynchronous. This routine is called upon the completion of the
@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
  *       Value is Mandatory. Allowed to be zero.
  */
 struct nvme_fc_port_template {
+       struct module   *module;
+
        /* initiator-based functions */
        void    (*localport_delete)(struct nvme_fc_local_port *);
        void    (*remoteport_delete)(struct nvme_fc_remote_port *);
index 99cefe6f5edb9d9e717f93f6e5c00dc0081b688b..491a2b7e77c1e90645180c13cdb34694f94d1196 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/of.h>
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
+extern bool of_mdiobus_child_is_phy(struct device_node *child);
 extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
 extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
 extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -54,6 +55,11 @@ static inline int of_mdio_parse_addr(struct device *dev,
 }
 
 #else /* CONFIG_OF_MDIO */
+static inline bool of_mdiobus_child_is_phy(struct device_node *child)
+{
+       return false;
+}
+
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
        /*
index 2302d133af6fa49ed449082a6d8c6e7a726624b7..352c0d708720a02134ac79b41eb0e6037c9491f6 100644 (file)
 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_19H_DF_F3    0x1653
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 5032d453ac66a2a6d3643d6dc59b7dfaeb042346..dd4a91f1feaa8b0a44601e9a7ea2f95063a840c3 100644 (file)
@@ -1000,7 +1000,7 @@ int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
 int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
                     u16 mask, u16 set);
 
-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
                                     bool is_c45,
                                     struct phy_c45_device_ids *c45_ids);
 #if IS_ENABLED(CONFIG_PHYLIB)
index 3d507a8a698929f6a2cbab4ebace6bfa9f21847d..5c4d7a7551011da675fd47f996ab73201a988a7b 100644 (file)
@@ -14,7 +14,7 @@ struct phy_device;
 #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE      11
 
 #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
-                                      FIELD_SIZEOF(struct mdio_device, addr)+\
+                                      sizeof_field(struct mdio_device, addr)+\
                                       PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
 
 struct phy_led_trigger {
index 7f8c7d9583d36e6d2755aa8d6d4f29d5a99e5b7e..019fecd75d0cfb2ec3652cfa856504318ca0daee 100644 (file)
@@ -40,6 +40,7 @@ extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
 
 extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
 extern void devm_pinctrl_put(struct pinctrl *p);
+extern int pinctrl_select_default_state(struct device *dev);
 
 #ifdef CONFIG_PM
 extern int pinctrl_pm_select_default_state(struct device *dev);
@@ -122,6 +123,11 @@ static inline void devm_pinctrl_put(struct pinctrl *p)
 {
 }
 
+static inline int pinctrl_select_default_state(struct device *dev)
+{
+       return 0;
+}
+
 static inline int pinctrl_pm_select_default_state(struct device *dev)
 {
        return 0;
index 6d54fe3bcac9c934e679d53d8101e3da81a88d4e..b8da8aef24464fdcea202f9cb20f5393e32a8a11 100644 (file)
@@ -101,6 +101,7 @@ struct mlxreg_core_data {
  * @aggr_mask: group aggregation mask;
  * @reg: group interrupt status register;
  * @mask: group interrupt mask;
+ * @capability: group capability register;
  * @cache: last status value for elements fro the same group;
  * @count: number of available elements in the group;
  * @ind: element's index inside the group;
@@ -112,6 +113,7 @@ struct mlxreg_core_item {
        u32 aggr_mask;
        u32 reg;
        u32 mask;
+       u32 capability;
        u32 cache;
        u8 count;
        u8 ind;
index 0b938047514445e3b017f93589ee1e55d74b4555..8cfe570fdece617e997da7ce3950544e6e4d9c7a 100644 (file)
@@ -49,6 +49,7 @@ struct sysc_regbits {
        s8 emufree_shift;
 };
 
+#define SYSC_QUIRK_FORCE_MSTANDBY      BIT(20)
 #define SYSC_MODULE_QUIRK_AESS         BIT(19)
 #define SYSC_MODULE_QUIRK_SGX          BIT(18)
 #define SYSC_MODULE_QUIRK_HDQ1W                BIT(17)
index 60249e22e8441994d7b8f89c6d84ebc5e71994c7..d39fc658c3205c7456f116c7a692645bb897a342 100644 (file)
@@ -58,6 +58,7 @@
 #define ASUS_WMI_DEVID_LIGHT_SENSOR    0x00050022 /* ?? */
 #define ASUS_WMI_DEVID_LIGHTBAR                0x00050025
 #define ASUS_WMI_DEVID_FAN_BOOST_MODE  0x00110018
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
 
 /* Misc */
 #define ASUS_WMI_DEVID_CAMERA          0x00060013
index 08468fca5ea2e18619346e07dc0b3e76e6779d86..1ea5bae708a154c739d40c64c716128513adb84f 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _PMBUS_H_
 #define _PMBUS_H_
 
+#include <linux/bits.h>
+
 /* flags */
 
 /*
  * communication errors for no explicable reason. For such chips, checking
  * the status register must be disabled.
  */
-#define PMBUS_SKIP_STATUS_CHECK        (1 << 0)
+#define PMBUS_SKIP_STATUS_CHECK        BIT(0)
+
+/*
+ * PMBUS_WRITE_PROTECTED
+ * Set if the chip is write protected and write protection is not determined
+ * by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_WRITE_PROTECTED  BIT(1)
 
 struct pmbus_platform_data {
        u32 flags;              /* Device specific flags */
index fe6cfdcfbc260280bf941ea666109e275e1d7464..468328b1e1dd583fedfdd67e4f85bfd94d43c31d 100644 (file)
@@ -69,29 +69,32 @@ struct posix_clock_operations {
  *
  * @ops:     Functional interface to the clock
  * @cdev:    Character device instance for this clock
- * @kref:    Reference count.
+ * @dev:     Pointer to the clock's device.
  * @rwsem:   Protects the 'zombie' field from concurrent access.
  * @zombie:  If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- *           zero. May be NULL if structure is statically allocated.
  *
  * Drivers should embed their struct posix_clock within a private
  * structure, obtaining a reference to it during callbacks using
  * container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
  */
 struct posix_clock {
        struct posix_clock_operations ops;
        struct cdev cdev;
-       struct kref kref;
+       struct device *dev;
        struct rw_semaphore rwsem;
        bool zombie;
-       void (*release)(struct posix_clock *clk);
 };
 
 /**
  * posix_clock_register() - register a new clock
- * @clk:   Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk:   Pointer to the clock. Caller must provide 'ops' field
+ * @dev:   Pointer to the initialized device. Caller must provide
+ *         'release' field
  *
  * A clock driver calls this function to register itself with the
  * clock device subsystem. If 'clk' points to dynamically allocated
@@ -100,7 +103,7 @@ struct posix_clock {
  *
  * Returns zero on success, non-zero otherwise.
  */
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
 
 /**
  * posix_clock_unregister() - unregister a clock
index c09d67edda3a108eb507e02a0b1f7b0c1d68410f..1e6108b8d15fc22a2dd93a69d02028606996d831 100644 (file)
@@ -302,9 +302,8 @@ extern int kptr_restrict;
        printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning(fmt, ...) \
+#define pr_warn(fmt, ...) \
        printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn pr_warning
 #define pr_notice(fmt, ...) \
        printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
index d31cb6215905c2f719f2393185b2e3ea3d05d493..d312e6281e69e8095605d12f46329679a6d356ba 100644 (file)
@@ -32,6 +32,8 @@ extern const struct proc_ns_operations pidns_for_children_operations;
 extern const struct proc_ns_operations userns_operations;
 extern const struct proc_ns_operations mntns_operations;
 extern const struct proc_ns_operations cgroupns_operations;
+extern const struct proc_ns_operations timens_operations;
+extern const struct proc_ns_operations timens_for_children_operations;
 
 /*
  * We always define these enumerators
@@ -43,6 +45,7 @@ enum {
        PROC_USER_INIT_INO      = 0xEFFFFFFDU,
        PROC_PID_INIT_INO       = 0xEFFFFFFCU,
        PROC_CGROUP_INIT_INO    = 0xEFFFFFFBU,
+       PROC_TIME_INIT_INO      = 0xEFFFFFFAU,
 };
 
 #ifdef CONFIG_PROC_FS
index 48335288c2a96db23f6de5888b31282f315d8ec5..d86de017c689c1278044a454d46e077e7aadccc6 100644 (file)
@@ -22,6 +22,7 @@ enum dev_prop_type {
        DEV_PROP_U32,
        DEV_PROP_U64,
        DEV_PROP_STRING,
+       DEV_PROP_REF,
 };
 
 enum dev_dma_attr {
@@ -223,28 +224,42 @@ static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode,
        return fwnode_property_read_u64_array(fwnode, propname, NULL, 0);
 }
 
+struct software_node;
+
+/**
+ * struct software_node_ref_args - Reference property with additional arguments
+ * @node: Reference to a software node
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments
+ */
+struct software_node_ref_args {
+       const struct software_node *node;
+       unsigned int nargs;
+       u64 args[NR_FWNODE_REFERENCE_ARGS];
+};
+
 /**
  * struct property_entry - "Built-in" device property representation.
  * @name: Name of the property.
  * @length: Length of data making up the value.
- * @is_array: True when the property is an array.
+ * @is_inline: True when the property value is stored inline.
  * @type: Type of the data in unions.
- * @pointer: Pointer to the property (an array of items of the given type).
- * @value: Value of the property (when it is a single item of the given type).
+ * @pointer: Pointer to the property when it is not stored inline.
+ * @value: Value of the property when it is stored inline.
  */
 struct property_entry {
        const char *name;
        size_t length;
-       bool is_array;
+       bool is_inline;
        enum dev_prop_type type;
        union {
                const void *pointer;
                union {
-                       u8 u8_data;
-                       u16 u16_data;
-                       u32 u32_data;
-                       u64 u64_data;
-                       const char *str;
+                       u8 u8_data[sizeof(u64) / sizeof(u8)];
+                       u16 u16_data[sizeof(u64) / sizeof(u16)];
+                       u32 u32_data[sizeof(u64) / sizeof(u32)];
+                       u64 u64_data[sizeof(u64) / sizeof(u64)];
+                       const char *str[sizeof(u64) / sizeof(char *)];
                } value;
        };
 };
@@ -256,17 +271,22 @@ struct property_entry {
  */
 
 #define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_)                          \
-       sizeof(((struct property_entry *)NULL)->value._elem_)
+       sizeof(((struct property_entry *)NULL)->value._elem_[0])
 
-#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
+#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_,    \
+                                         _val_, _len_)                 \
 (struct property_entry) {                                              \
        .name = _name_,                                                 \
-       .length = (_len_) * __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_),      \
-       .is_array = true,                                               \
+       .length = (_len_) * (_elsize_),                                 \
        .type = DEV_PROP_##_Type_,                                      \
        { .pointer = _val_ },                                           \
 }
 
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
+       __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_,                       \
+                               __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_),  \
+                               _Type_, _val_, _len_)
+
 #define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_)              \
        __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
 #define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_)             \
@@ -277,6 +297,10 @@ struct property_entry {
        __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
 #define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_)          \
        __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_)             \
+       __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_,                       \
+                               sizeof(struct software_node_ref_args),  \
+                               REF, _val_, _len_)
 
 #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_)                         \
        PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
@@ -288,13 +312,16 @@ struct property_entry {
        PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
 #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_)                     \
        PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_)                        \
+       PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
 
 #define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_)                \
 (struct property_entry) {                                              \
        .name = _name_,                                                 \
        .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_),                \
+       .is_inline = true,                                              \
        .type = DEV_PROP_##_Type_,                                      \
-       { .value = { ._elem_ = _val_ } },                               \
+       { .value = { ._elem_[0] = _val_ } },                            \
 }
 
 #define PROPERTY_ENTRY_U8(_name_, _val_)                               \
@@ -311,6 +338,19 @@ struct property_entry {
 #define PROPERTY_ENTRY_BOOL(_name_)            \
 (struct property_entry) {                      \
        .name = _name_,                         \
+       .is_inline = true,                      \
+}
+
+#define PROPERTY_ENTRY_REF(_name_, _ref_, ...)                         \
+(struct property_entry) {                                              \
+       .name = _name_,                                                 \
+       .length = sizeof(struct software_node_ref_args),                \
+       .type = DEV_PROP_REF,                                           \
+       { .pointer = &(const struct software_node_ref_args) {           \
+               .node = _ref_,                                          \
+               .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+               .args = { __VA_ARGS__ },                                \
+       } },                                                            \
 }
 
 struct property_entry *
@@ -376,44 +416,16 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
 /* -------------------------------------------------------------------------- */
 /* Software fwnode support - when HW description is incomplete or missing */
 
-struct software_node;
-
-/**
- * struct software_node_ref_args - Reference with additional arguments
- * @node: Reference to a software node
- * @nargs: Number of elements in @args array
- * @args: Integer arguments
- */
-struct software_node_ref_args {
-       const struct software_node *node;
-       unsigned int nargs;
-       u64 args[NR_FWNODE_REFERENCE_ARGS];
-};
-
-/**
- * struct software_node_reference - Named software node reference property
- * @name: Name of the property
- * @nrefs: Number of elements in @refs array
- * @refs: Array of references with optional arguments
- */
-struct software_node_reference {
-       const char *name;
-       unsigned int nrefs;
-       const struct software_node_ref_args *refs;
-};
-
 /**
  * struct software_node - Software node description
  * @name: Name of the software node
  * @parent: Parent of the software node
  * @properties: Array of device properties
- * @references: Array of software node reference properties
  */
 struct software_node {
        const char *name;
        const struct software_node *parent;
        const struct property_entry *properties;
-       const struct software_node_reference *references;
 };
 
 bool is_software_node(const struct fwnode_handle *fwnode);
index 0832c9b66852ebd604417683c29c3d0a207cf813..154e954b711dbf16fe122c27ab79ddc89148e6ea 100644 (file)
@@ -27,8 +27,8 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
 
 #include <errno.h>
 #include <inttypes.h>
-#include <limits.h>
 #include <stddef.h>
+#include <string.h>
 #include <sys/mman.h>
 #include <sys/time.h>
 #include <sys/types.h>
@@ -44,6 +44,9 @@ typedef uint64_t u64;
 #ifndef PAGE_SIZE
 # define PAGE_SIZE 4096
 #endif
+#ifndef PAGE_SHIFT
+# define PAGE_SHIFT 12
+#endif
 extern const char raid6_empty_zero_page[PAGE_SIZE];
 
 #define __init
@@ -59,7 +62,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
 #define enable_kernel_altivec()
 #define disable_kernel_altivec()
 
+#undef EXPORT_SYMBOL
 #define EXPORT_SYMBOL(sym)
+#undef EXPORT_SYMBOL_GPL
 #define EXPORT_SYMBOL_GPL(sym)
 #define MODULE_LICENSE(licence)
 #define MODULE_DESCRIPTION(desc)
index 646759042333cfb045ef488555c595b8e85ddd07..b36afe7b22c9a6b4f08ac0d3d04d6d0a30b57da8 100644 (file)
@@ -22,7 +22,6 @@ struct rcu_cblist {
        struct rcu_head *head;
        struct rcu_head **tail;
        long len;
-       long len_lazy;
 };
 
 #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
@@ -73,7 +72,6 @@ struct rcu_segcblist {
 #else
        long len;
 #endif
-       long len_lazy;
        u8 enabled;
        u8 offloaded;
 };
index 4158b72129369fb54e7a617362108039f68986a0..9f313e4999fea1b7571c64045a20b3dc2d0236d0 100644 (file)
@@ -40,6 +40,16 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
  */
 #define list_next_rcu(list)    (*((struct list_head __rcu **)(&(list)->next)))
 
+/**
+ * list_tail_rcu - returns the prev pointer of the head of the list
+ * @head: the head of the list
+ *
+ * Note: This should only be used with the list header, and even then
+ * only if list_del() and similar primitives are not also used on the
+ * list header.
+ */
+#define list_tail_rcu(head)    (*((struct list_head __rcu **)(&(head)->prev)))
+
 /*
  * Check during list traversal that we are within an RCU reader
  */
@@ -173,7 +183,7 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
 {
        if (!hlist_unhashed(n)) {
                __hlist_del(n);
-               n->pprev = NULL;
+               WRITE_ONCE(n->pprev, NULL);
        }
 }
 
@@ -361,7 +371,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  * @pos:       the type * to use as a loop cursor.
  * @head:      the head for your list.
  * @member:    the name of the list_head within the struct.
- * @cond:      optional lockdep expression if called from non-RCU protection.
+ * @cond...:   optional lockdep expression if called from non-RCU protection.
  *
  * This list-traversal primitive may safely run concurrently with
  * the _rcu list-mutation primitives such as list_add_rcu()
@@ -473,7 +483,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 static inline void hlist_del_rcu(struct hlist_node *n)
 {
        __hlist_del(n);
-       n->pprev = LIST_POISON2;
+       WRITE_ONCE(n->pprev, LIST_POISON2);
 }
 
 /**
@@ -489,11 +499,11 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
        struct hlist_node *next = old->next;
 
        new->next = next;
-       new->pprev = old->pprev;
+       WRITE_ONCE(new->pprev, old->pprev);
        rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
        if (next)
-               new->next->pprev = &new->next;
-       old->pprev = LIST_POISON2;
+               WRITE_ONCE(new->next->pprev, &new->next);
+       WRITE_ONCE(old->pprev, LIST_POISON2);
 }
 
 /*
@@ -528,10 +538,10 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
        struct hlist_node *first = h->first;
 
        n->next = first;
-       n->pprev = &h->first;
+       WRITE_ONCE(n->pprev, &h->first);
        rcu_assign_pointer(hlist_first_rcu(h), n);
        if (first)
-               first->pprev = &n->next;
+               WRITE_ONCE(first->pprev, &n->next);
 }
 
 /**
@@ -564,7 +574,7 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
 
        if (last) {
                n->next = last->next;
-               n->pprev = &last->next;
+               WRITE_ONCE(n->pprev, &last->next);
                rcu_assign_pointer(hlist_next_rcu(last), n);
        } else {
                hlist_add_head_rcu(n, h);
@@ -592,10 +602,10 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
 static inline void hlist_add_before_rcu(struct hlist_node *n,
                                        struct hlist_node *next)
 {
-       n->pprev = next->pprev;
+       WRITE_ONCE(n->pprev, next->pprev);
        n->next = next;
        rcu_assign_pointer(hlist_pprev_rcu(n), n);
-       next->pprev = &n->next;
+       WRITE_ONCE(next->pprev, &n->next);
 }
 
 /**
@@ -620,10 +630,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
                                        struct hlist_node *prev)
 {
        n->next = prev->next;
-       n->pprev = &prev->next;
+       WRITE_ONCE(n->pprev, &prev->next);
        rcu_assign_pointer(hlist_next_rcu(prev), n);
        if (n->next)
-               n->next->pprev = &n->next;
+               WRITE_ONCE(n->next->pprev, &n->next);
 }
 
 #define __hlist_for_each_rcu(pos, head)                                \
@@ -636,7 +646,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
  * @pos:       the type * to use as a loop cursor.
  * @head:      the head for your list.
  * @member:    the name of the hlist_node within the struct.
- * @cond:      optional lockdep expression if called from non-RCU protection.
+ * @cond...:   optional lockdep expression if called from non-RCU protection.
  *
  * This list-traversal primitive may safely run concurrently with
  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
index bc8206a8f30e6b0c2172e3bc3da3a85cab536cdd..e5b752027a031b119ce09a982b93f935c736c93d 100644 (file)
@@ -34,13 +34,21 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
 {
        if (!hlist_nulls_unhashed(n)) {
                __hlist_nulls_del(n);
-               n->pprev = NULL;
+               WRITE_ONCE(n->pprev, NULL);
        }
 }
 
+/**
+ * hlist_nulls_first_rcu - returns the first element of the hash list.
+ * @head: the head of the list.
+ */
 #define hlist_nulls_first_rcu(head) \
        (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
 
+/**
+ * hlist_nulls_next_rcu - returns the element of the list after @node.
+ * @node: element of the list.
+ */
 #define hlist_nulls_next_rcu(node) \
        (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
 
@@ -66,7 +74,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
 static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
 {
        __hlist_nulls_del(n);
-       n->pprev = LIST_POISON2;
+       WRITE_ONCE(n->pprev, LIST_POISON2);
 }
 
 /**
@@ -94,17 +102,54 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
        struct hlist_nulls_node *first = h->first;
 
        n->next = first;
-       n->pprev = &h->first;
+       WRITE_ONCE(n->pprev, &h->first);
        rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
        if (!is_a_nulls(first))
-               first->pprev = &n->next;
+               WRITE_ONCE(first->pprev, &n->next);
+}
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+                                           struct hlist_nulls_head *h)
+{
+       struct hlist_nulls_node *i, *last = NULL;
+
+       /* Note: write side code, so rcu accessors are not needed. */
+       for (i = h->first; !is_a_nulls(i); i = i->next)
+               last = i;
+
+       if (last) {
+               n->next = last->next;
+               n->pprev = &last->next;
+               rcu_assign_pointer(hlist_next_rcu(last), n);
+       } else {
+               hlist_nulls_add_head_rcu(n, h);
+       }
 }
 
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
  * @pos:       the &struct hlist_nulls_node to use as a loop cursor.
- * @head:      the head for your list.
+ * @head:      the head of the list.
  * @member:    the name of the hlist_nulls_node within the struct.
  *
  * The barrier() is needed to make sure compiler doesn't cache first element [1],
@@ -124,7 +169,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
  *   iterate over list of given type safe against removal of list entry
  * @tpos:      the type * to use as a loop cursor.
  * @pos:       the &struct hlist_nulls_node to use as a loop cursor.
- * @head:      the head for your list.
+ * @head:      the head of the list.
  * @member:    the name of the hlist_nulls_node within the struct.
  */
 #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member)               \
index 0b7506330c87dc1df106c78ddb09b4dca9172864..2678a37c31696844a9f79615eb2e4906b61e68f9 100644 (file)
@@ -154,7 +154,7 @@ static inline void exit_tasks_rcu_finish(void) { }
  *
  * This macro resembles cond_resched(), except that it is defined to
  * report potential quiescent states to RCU-tasks even if the cond_resched()
- * machinery were to be shut off, as some advocate for PREEMPT kernels.
+ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
  */
 #define cond_resched_tasks_rcu_qs() \
 do { \
@@ -167,7 +167,7 @@ do { \
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
  */
 
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
 #include <linux/rcutree.h>
 #elif defined(CONFIG_TINY_RCU)
 #include <linux/rcutiny.h>
@@ -400,22 +400,6 @@ do {                                                                             \
        __tmp;                                                          \
 })
 
-/**
- * rcu_swap_protected() - swap an RCU and a regular pointer
- * @rcu_ptr: RCU pointer
- * @ptr: regular pointer
- * @c: the conditions under which the dereference will take place
- *
- * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
- * @c is the argument that is passed to the rcu_dereference_protected() call
- * used to read that pointer.
- */
-#define rcu_swap_protected(rcu_ptr, ptr, c) do {                       \
-       typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c));  \
-       rcu_assign_pointer((rcu_ptr), (ptr));                           \
-       (ptr) = __tmp;                                                  \
-} while (0)
-
 /**
  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
  * @p: The pointer to read
@@ -598,10 +582,10 @@ do {                                                                            \
  *
  * You can avoid reading and understanding the next paragraph by
  * following this rule: don't put anything in an rcu_read_lock() RCU
- * read-side critical section that would block in a !PREEMPT kernel.
+ * read-side critical section that would block in a !PREEMPTION kernel.
  * But if you want the full story, read on!
  *
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+ * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
  * it is illegal to block while in an RCU read-side critical section.
  * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
  * kernel builds, RCU read-side critical sections may be preempted,
@@ -912,4 +896,8 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
        return false;
 }
 
+/* kernel/ksysfs.c definitions */
+extern int rcu_expedited;
+extern int rcu_normal;
+
 #endif /* __LINUX_RCUPDATE_H */
index 37b6f0c2b79d95422acc33f255ea09c635c3862b..b2b2dc990da99b7cb0191a6c380dffccc0b58c3e 100644 (file)
@@ -85,6 +85,7 @@ static inline void rcu_scheduler_starting(void) { }
 static inline void rcu_end_inkernel_boot(void) { }
 static inline bool rcu_is_watching(void) { return true; }
 static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void kfree_rcu_scheduler_running(void) { }
 
 /* Avoid RCU read-side critical sections leaking across. */
 static inline void rcu_all_qs(void) { barrier(); }
index c5147de885ec47abb94e9756cea867711b9297f7..2f787b9029d1ed0770da54db6d1a6b0ae89d7849 100644 (file)
@@ -38,6 +38,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
 void rcu_barrier(void);
 bool rcu_eqs_special_set(int cpu);
 void rcu_momentary_dyntick_idle(void);
+void kfree_rcu_scheduler_running(void);
 unsigned long get_state_synchronize_rcu(void);
 void cond_synchronize_rcu(unsigned long oldstate);
 
index dfe493ac692d2bd890d281ba48e61a0a94ef5e61..f0a092a1a96db6629831fc1a188743daf4a668aa 100644 (file)
@@ -144,6 +144,51 @@ struct reg_sequence {
        __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
 })
 
+/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ *            Should be less than ~10us since udelay is used
+ *            (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+       u64 __timeout_us = (timeout_us); \
+       unsigned long __delay_us = (delay_us); \
+       ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+       int __ret; \
+       for (;;) { \
+               __ret = regmap_read((map), (addr), &(val)); \
+               if (__ret) \
+                       break; \
+               if (cond) \
+                       break; \
+               if ((__timeout_us) && \
+                   ktime_compare(ktime_get(), __timeout) > 0) { \
+                       __ret = regmap_read((map), (addr), &(val)); \
+                       break; \
+               } \
+               if (__delay_us) \
+                       udelay(__delay_us); \
+       } \
+       __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
 /**
  * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
  *
index 337a463915278cc000cbdb0d47b40ea4e5645a7d..6a92fd3105a31ef9162b5575cd91283bfccac57f 100644 (file)
@@ -287,6 +287,8 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
                                     const char *const *supply_names,
                                     unsigned int num_supplies);
 
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2);
+
 #else
 
 /*
@@ -593,6 +595,11 @@ regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
 {
 }
 
+static inline bool
+regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+       return false;
+}
 #endif
 
 static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
new file mode 100644 (file)
index 0000000..daf5cf6
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _RESCTRL_H
+#define _RESCTRL_H
+
+#ifdef CONFIG_PROC_CPU_RESCTRL
+
+int proc_resctrl_show(struct seq_file *m,
+                     struct pid_namespace *ns,
+                     struct pid *pid,
+                     struct task_struct *tsk);
+
+#endif
+
+#endif /* _RESCTRL_H */
index 467d2604641692c8a9f2c950f7b2083e374757bb..716ad1d8d95e1457dd1b42f51bebbf692e836830 100644 (file)
@@ -1929,11 +1929,11 @@ static inline void rseq_migrate(struct task_struct *t)
 
 /*
  * If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread.
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
  */
 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
-       if (clone_flags & CLONE_THREAD) {
+       if (clone_flags & CLONE_VM) {
                t->rseq = NULL;
                t->rseq_sig = 0;
                t->rseq_event_mask = 0;
index afa940cd50dc5355bed0815c0d7a0c20a548fb98..3ed5aa18593f2aa4d7274e6def1c970117568376 100644 (file)
@@ -9,9 +9,10 @@
  */
 
 #define SCHED_CPUFREQ_IOWAIT   (1U << 0)
-#define SCHED_CPUFREQ_MIGRATION        (1U << 1)
 
 #ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy;
+
 struct update_util_data {
        void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
 };
@@ -20,6 +21,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
                        void (*func)(struct update_util_data *data, u64 time,
                                    unsigned int flags));
 void cpufreq_remove_update_util_hook(int cpu);
+bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
 
 static inline unsigned long map_util_freq(unsigned long util,
                                        unsigned long freq, unsigned long cap)
index 6c8512d3be88e4d1d34cbfa861fdbc3094ee925b..0fbcbacd1b2900bae38fd90f7aea73c04f06572d 100644 (file)
@@ -13,6 +13,7 @@ enum hk_flags {
        HK_FLAG_TICK            = (1 << 4),
        HK_FLAG_DOMAIN          = (1 << 5),
        HK_FLAG_WQ              = (1 << 6),
+       HK_FLAG_MANAGED_IRQ     = (1 << 7),
 };
 
 #ifdef CONFIG_CPU_ISOLATION
index 3e8d4bacd59de1d338e4b5f33ee47d502ddd7409..64b19f050343eef7b74b23c0626fde7a141cb8f6 100644 (file)
@@ -128,6 +128,8 @@ enum lockdown_reason {
        LOCKDOWN_CONFIDENTIALITY_MAX,
 };
 
+extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+
 /* These functions are in security/commoncap.c */
 extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
                       int cap, unsigned int opts);
index ef7031f8a304f524c3651d4c83e8053a2f0f9e8e..14d61bba0b79bd0e8cc48c24fc84970546537a4f 100644 (file)
@@ -358,17 +358,22 @@ static inline void sk_psock_update_proto(struct sock *sk,
 static inline void sk_psock_restore_proto(struct sock *sk,
                                          struct sk_psock *psock)
 {
-       sk->sk_write_space = psock->saved_write_space;
+       sk->sk_prot->unhash = psock->saved_unhash;
 
        if (psock->sk_proto) {
                struct inet_connection_sock *icsk = inet_csk(sk);
                bool has_ulp = !!icsk->icsk_ulp_data;
 
-               if (has_ulp)
-                       tcp_update_ulp(sk, psock->sk_proto);
-               else
+               if (has_ulp) {
+                       tcp_update_ulp(sk, psock->sk_proto,
+                                      psock->saved_write_space);
+               } else {
                        sk->sk_prot = psock->sk_proto;
+                       sk->sk_write_space = psock->saved_write_space;
+               }
                psock->sk_proto = NULL;
+       } else {
+               sk->sk_write_space = psock->saved_write_space;
        }
 }
 
index 6fc856c9eda581fb4db0001cf2072db47bf31b1d..cbc9162689d0f56c3f0fa7b8776df10cf10c4482 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/llist.h>
 
 typedef void (*smp_call_func_t)(void *info);
+typedef bool (*smp_cond_func_t)(int cpu, void *info);
 struct __call_single_data {
        struct llist_node llist;
        smp_call_func_t func;
@@ -49,13 +50,11 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
  * cond_func returns a positive value. This may include the local
  * processor.
  */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-               smp_call_func_t func, void *info, bool wait,
-               gfp_t gfp_flags);
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+                     void *info, bool wait);
 
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-               smp_call_func_t func, void *info, bool wait,
-               gfp_t gfp_flags, const struct cpumask *mask);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+                          void *info, bool wait, const struct cpumask *mask);
 
 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
new file mode 100644 (file)
index 0000000..26f73df
--- /dev/null
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 Ring Accelerator (RA) subsystem interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __SOC_TI_K3_RINGACC_API_H_
+#define __SOC_TI_K3_RINGACC_API_H_
+
+#include <linux/types.h>
+
+struct device_node;
+
+/**
+ * enum k3_ring_mode - &struct k3_ring_cfg mode
+ *
+ * RA ring operational modes
+ *
+ * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access
+ * @K3_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires
+ *     that all accesses to the queue must go through this IP so that all
+ *     accesses to the memory are controlled and ordered. This IP then
+ *     controls the entire state of the queue, and SW has no directly control,
+ *     such as through doorbells and cannot access the storage memory directly.
+ *     This is particularly useful when more than one SW or HW entity can be
+ *     the producer and/or consumer at the same time
+ * @K3_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus
+ *     stores credentials with each message, requiring the element size to be
+ *     doubled to fit the credentials. Any exposed memory should be protected
+ *     by a firewall from unwanted access
+ */
+enum k3_ring_mode {
+       K3_RINGACC_RING_MODE_RING = 0,
+       K3_RINGACC_RING_MODE_MESSAGE,
+       K3_RINGACC_RING_MODE_CREDENTIALS,
+       K3_RINGACC_RING_MODE_INVALID
+};
+
+/**
+ * enum k3_ring_size - &struct k3_ring_cfg elm_size
+ *
+ * RA ring element's sizes in bytes.
+ */
+enum k3_ring_size {
+       K3_RINGACC_RING_ELSIZE_4 = 0,
+       K3_RINGACC_RING_ELSIZE_8,
+       K3_RINGACC_RING_ELSIZE_16,
+       K3_RINGACC_RING_ELSIZE_32,
+       K3_RINGACC_RING_ELSIZE_64,
+       K3_RINGACC_RING_ELSIZE_128,
+       K3_RINGACC_RING_ELSIZE_256,
+       K3_RINGACC_RING_ELSIZE_INVALID
+};
+
+struct k3_ringacc;
+struct k3_ring;
+
+/**
+ * enum k3_ring_cfg - RA ring configuration structure
+ *
+ * @size: Ring size, number of elements
+ * @elm_size: Ring element size
+ * @mode: Ring operational mode
+ * @flags: Ring configuration flags. Possible values:
+ *      @K3_RINGACC_RING_SHARED: when set allows to request the same ring
+ *      few times. It's usable when the same ring is used as Free Host PD ring
+ *      for different flows, for example.
+ *      Note: Locking should be done by consumer if required
+ */
+struct k3_ring_cfg {
+       u32 size;
+       enum k3_ring_size elm_size;
+       enum k3_ring_mode mode;
+#define K3_RINGACC_RING_SHARED BIT(1)
+       u32 flags;
+};
+
+#define K3_RINGACC_RING_ID_ANY (-1)
+
+/**
+ * of_k3_ringacc_get_by_phandle - find a RA by phandle property
+ * @np: device node
+ * @propname: property name containing phandle on RA node
+ *
+ * Returns pointer on the RA - struct k3_ringacc
+ * or -ENODEV if not found,
+ * or -EPROBE_DEFER if not yet registered
+ */
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+                                               const char *property);
+
+#define K3_RINGACC_RING_USE_PROXY BIT(1)
+
+/**
+ * k3_ringacc_request_ring - request ring from ringacc
+ * @ringacc: pointer on ringacc
+ * @id: ring id or K3_RINGACC_RING_ID_ANY for any general purpose ring
+ * @flags:
+ *     @K3_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and
+ *             used to access ring memory. Sopported only for rings in
+ *             Message/Credentials/Queue mode.
+ *
+ * Returns pointer on the Ring - struct k3_ring
+ * or NULL in case of failure.
+ */
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+                                       int id, u32 flags);
+
+/**
+ * k3_ringacc_ring_reset - ring reset
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx).
+ */
+void k3_ringacc_ring_reset(struct k3_ring *ring);
+/**
+ * k3_ringacc_ring_reset - ring reset for DMA rings
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings
+ * which are read by K3 UDMA, like TX or Free Host PD rings.
+ */
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ);
+
+/**
+ * k3_ringacc_ring_free - ring free
+ * @ring: pointer on Ring
+ *
+ * Resets ring and free all alocated resources.
+ */
+int k3_ringacc_ring_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_id - Get the Ring ID
+ * @ring: pointer on ring
+ *
+ * Returns the Ring ID
+ */
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_irq_num - Get the irq number for the ring
+ * @ring: pointer on ring
+ *
+ * Returns the interrupt number which can be used to request the interrupt
+ */
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_cfg - ring configure
+ * @ring: pointer on ring
+ * @cfg: Ring configuration parameters (see &struct k3_ring_cfg)
+ *
+ * Configures ring, including ring memory allocation.
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg);
+
+/**
+ * k3_ringacc_ring_get_size - get ring size
+ * @ring: pointer on ring
+ *
+ * Returns ring size in number of elements.
+ */
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_free - get free elements
+ * @ring: pointer on ring
+ *
+ * Returns number of free elements in the ring.
+ */
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_occ - get ring occupancy
+ * @ring: pointer on ring
+ *
+ * Returns total number of valid entries on the ring
+ */
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_is_full - checks if ring is full
+ * @ring: pointer on ring
+ *
+ * Returns true if the ring is full
+ */
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_push - push element to the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop - pop element from the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size..
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_push_head - push element to the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop_tail - pop element from the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring);
+
+#endif /* __SOC_TI_K3_RINGACC_API_H_ */
index 98fe8663033af4534f9405655f177baab30d6251..6d16ba01ff5a2e2069607c4bf80f25f37fe661f6 100644 (file)
@@ -423,6 +423,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *     GPIO descriptors rather than using global GPIO numbers grabbed by the
  *     driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
  *     and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
+ *     fill in this field with the first unused native CS, to be used by SPI
+ *     controller drivers that need to drive a native CS when using GPIO CS.
+ * @max_native_cs: When cs_gpiods is used, and this field is filled in,
+ *     spi_register_controller() will validate all native CS (including the
+ *     unused native CS) against this value.
  * @statistics: statistics for the spi_controller
  * @dma_tx: DMA transmit channel
  * @dma_rx: DMA receive channel
@@ -624,6 +630,8 @@ struct spi_controller {
        int                     *cs_gpios;
        struct gpio_desc        **cs_gpiods;
        bool                    use_gpio_descriptors;
+       u8                      unused_native_cs;
+       u8                      max_native_cs;
 
        /* statistics */
        struct spi_statistics   statistics;
@@ -689,10 +697,10 @@ extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
 /* Helper calls for driver to timestamp transfer */
 void spi_take_timestamp_pre(struct spi_controller *ctlr,
                            struct spi_transfer *xfer,
-                           const void *tx, bool irqs_off);
+                           size_t progress, bool irqs_off);
 void spi_take_timestamp_post(struct spi_controller *ctlr,
                             struct spi_transfer *xfer,
-                            const void *tx, bool irqs_off);
+                            size_t progress, bool irqs_off);
 
 /* the spi driver core manages memory for the spi_controller classdev */
 extern struct spi_controller *__spi_alloc_controller(struct device *host,
index a3ecf2feadf20402e00f88b1634dbfd34497ad4f..284872ac130c0b806d658e21175dfae61ed5c0ca 100644 (file)
@@ -6,16 +6,12 @@
  * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
  * @freq:      input clock freq to the core.
  * @baudwidth: baud rate divider width of the core.
- * @gpio_cs_count:     number of gpio pins used for chipselect.
- * @gpio_cs:   array of gpio pins used for chipselect.
  *
  * freq and baudwidth are used only if the divider is programmable.
  */
 struct tiny_spi_platform_data {
        unsigned int freq;
        unsigned int baudwidth;
-       unsigned int gpio_cs_count;
-       int *gpio_cs;
 };
 
 #endif /* _LINUX_SPI_SPI_OC_TINY_H */
index f9a0c6189852e8aaba55501f5ecda0818819ecd9..76d8b09384a7aecc1a8a688bcdd332adee0b5fa5 100644 (file)
@@ -32,8 +32,6 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
                         struct cpu_stop_work *work_buf);
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
 void stop_machine_park(int cpu);
 void stop_machine_unpark(int cpu);
 void stop_machine_yield(const struct cpumask *cpumask);
@@ -82,20 +80,6 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu,
        return false;
 }
 
-static inline int stop_cpus(const struct cpumask *cpumask,
-                           cpu_stop_fn_t fn, void *arg)
-{
-       if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
-               return stop_one_cpu(raw_smp_processor_id(), fn, arg);
-       return -ENOENT;
-}
-
-static inline int try_stop_cpus(const struct cpumask *cpumask,
-                               cpu_stop_fn_t fn, void *arg)
-{
-       return stop_cpus(cpumask, fn, arg);
-}
-
 #endif /* CONFIG_SMP */
 
 /*
index 6fc8843f1c9e93624302b5f28ab112df1a7ae0e7..4a230c2f1c317ab87c725afee60cb2a3e275a308 100644 (file)
@@ -329,6 +329,7 @@ extern void arch_suspend_disable_irqs(void);
 extern void arch_suspend_enable_irqs(void);
 
 extern int pm_suspend(suspend_state_t state);
+extern bool sync_on_suspend_enabled;
 #else /* !CONFIG_SUSPEND */
 #define suspend_valid_only_mem NULL
 
@@ -342,6 +343,7 @@ static inline bool pm_suspend_default_s2idle(void) { return false; }
 
 static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+static inline bool sync_on_suspend_enabled(void) { return true; }
 static inline bool idle_should_enter_s2idle(void) { return false; }
 static inline void __init pm_states_init(void) {}
 static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
index 85ec745767bd91b0728ef0acd6b813cbcf37a871..966146f7267a518b301da61396fdbd03fa473180 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * 10G controller driver for Samsung EXYNOS SoCs
+ * 10G controller driver for Samsung Exynos SoCs
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
index d0391cc2dae90611d9ec91187342202300efc01c..5262b7a76d392b60769a93dde860a30e4b23481f 100644 (file)
@@ -1231,8 +1231,6 @@ asmlinkage long sys_ni_syscall(void);
  * the ksys_xyzyyz() functions prototyped below.
  */
 
-int ksys_mount(const char __user *dev_name, const char __user *dir_name,
-              const char __user *type, unsigned long flags, void __user *data);
 int ksys_umount(char __user *name, int flags);
 int ksys_dup(unsigned int fildes);
 int ksys_chroot(const char __user *filename);
index 7896f792d3b0b78151b2e06b3003bbd4f2d7dbfe..7340613c7eff7ad791634c76ae2ecd547f711c45 100644 (file)
@@ -109,8 +109,10 @@ enum tick_dep_bits {
        TICK_DEP_BIT_PERF_EVENTS        = 1,
        TICK_DEP_BIT_SCHED              = 2,
        TICK_DEP_BIT_CLOCK_UNSTABLE     = 3,
-       TICK_DEP_BIT_RCU                = 4
+       TICK_DEP_BIT_RCU                = 4,
+       TICK_DEP_BIT_RCU_EXP            = 5
 };
+#define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
 
 #define TICK_DEP_MASK_NONE             0
 #define TICK_DEP_MASK_POSIX_TIMER      (1 << TICK_DEP_BIT_POSIX_TIMER)
@@ -118,6 +120,7 @@ enum tick_dep_bits {
 #define TICK_DEP_MASK_SCHED            (1 << TICK_DEP_BIT_SCHED)
 #define TICK_DEP_MASK_CLOCK_UNSTABLE   (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
 #define TICK_DEP_MASK_RCU              (1 << TICK_DEP_BIT_RCU)
+#define TICK_DEP_MASK_RCU_EXP          (1 << TICK_DEP_BIT_RCU_EXP)
 
 #ifdef CONFIG_NO_HZ_COMMON
 extern bool tick_nohz_enabled;
index 8e10b9dbd8c2082dbbacdf0ef5549f3c7e31591f..8ef5e5cc9f572e64f873bd16d0c29ddd573d9f44 100644 (file)
@@ -110,4 +110,10 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
  * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
  */
 #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
+
+struct timens_offset {
+       s64     sec;
+       u64     nsec;
+};
+
 #endif
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
new file mode 100644 (file)
index 0000000..824d54e
--- /dev/null
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMENS_H
+#define _LINUX_TIMENS_H
+
+
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
+#include <linux/err.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+struct timens_offsets {
+       struct timespec64 monotonic;
+       struct timespec64 boottime;
+};
+
+struct time_namespace {
+       struct kref             kref;
+       struct user_namespace   *user_ns;
+       struct ucounts          *ucounts;
+       struct ns_common        ns;
+       struct timens_offsets   offsets;
+       struct page             *vvar_page;
+       /* If set prevents changing offsets after any task joined namespace. */
+       bool                    frozen_offsets;
+} __randomize_layout;
+
+extern struct time_namespace init_time_ns;
+
+#ifdef CONFIG_TIME_NS
+extern int vdso_join_timens(struct task_struct *task,
+                           struct time_namespace *ns);
+
+static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
+{
+       kref_get(&ns->kref);
+       return ns;
+}
+
+struct time_namespace *copy_time_ns(unsigned long flags,
+                                   struct user_namespace *user_ns,
+                                   struct time_namespace *old_ns);
+void free_time_ns(struct kref *kref);
+int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
+struct vdso_data *arch_get_vdso_data(void *vvar_page);
+
+static inline void put_time_ns(struct time_namespace *ns)
+{
+       kref_put(&ns->kref, free_time_ns);
+}
+
+void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m);
+
+struct proc_timens_offset {
+       int                     clockid;
+       struct timespec64       val;
+};
+
+int proc_timens_set_offset(struct file *file, struct task_struct *p,
+                          struct proc_timens_offset *offsets, int n);
+
+static inline void timens_add_monotonic(struct timespec64 *ts)
+{
+       struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+       *ts = timespec64_add(*ts, ns_offsets->monotonic);
+}
+
+static inline void timens_add_boottime(struct timespec64 *ts)
+{
+       struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+       *ts = timespec64_add(*ts, ns_offsets->boottime);
+}
+
+ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim,
+                               struct timens_offsets *offsets);
+
+static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
+{
+       struct time_namespace *ns = current->nsproxy->time_ns;
+
+       if (likely(ns == &init_time_ns))
+               return tim;
+
+       return do_timens_ktime_to_host(clockid, tim, &ns->offsets);
+}
+
+#else
+static inline int vdso_join_timens(struct task_struct *task,
+                                  struct time_namespace *ns)
+{
+       return 0;
+}
+
+static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
+{
+       return NULL;
+}
+
+static inline void put_time_ns(struct time_namespace *ns)
+{
+}
+
+static inline
+struct time_namespace *copy_time_ns(unsigned long flags,
+                                   struct user_namespace *user_ns,
+                                   struct time_namespace *old_ns)
+{
+       if (flags & CLONE_NEWTIME)
+               return ERR_PTR(-EINVAL);
+
+       return old_ns;
+}
+
+static inline int timens_on_fork(struct nsproxy *nsproxy,
+                                struct task_struct *tsk)
+{
+       return 0;
+}
+
+static inline void timens_add_monotonic(struct timespec64 *ts) { }
+static inline void timens_add_boottime(struct timespec64 *ts) { }
+static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
+{
+       return tim;
+}
+#endif
+
+#endif /* _LINUX_TIMENS_H */
index c17af77f3fae7f98814b521b127b2e10945d1e5e..ea627d1ab7e39bebb4db5bb4d436530d57f63567 100644 (file)
@@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift);
 /* Shift (rsh) a tnum right (by a fixed shift) */
 struct tnum tnum_rshift(struct tnum a, u8 shift);
 /* Shift (arsh) a tnum right (by a fixed min_shift) */
-struct tnum tnum_arshift(struct tnum a, u8 min_shift);
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
 /* Add two tnums, return @a + @b */
 struct tnum tnum_add(struct tnum a, struct tnum b);
 /* Subtract two tnums, return @a - @b */
index 0d6e949ba315d28920ae43f16a77c4f5bb3dadd2..03e9b184411bee44d8be375bbcaefff5f72509a2 100644 (file)
@@ -403,6 +403,7 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
 extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
 extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
 extern struct tpm_chip *tpm_default_chip(void);
+void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
 #else
 static inline int tpm_is_tpm2(struct tpm_chip *chip)
 {
index 4c6e15605766137d65936f7d96352e76f541fb92..13ea7f7d54acc034d48111da846a0f2d184f5764 100644 (file)
@@ -192,6 +192,22 @@ enum trace_reg {
 
 struct trace_event_call;
 
+#define TRACE_FUNCTION_TYPE ((const char *)~0UL)
+
+struct trace_event_fields {
+       const char *type;
+       union {
+               struct {
+                       const char *name;
+                       const int  size;
+                       const int  align;
+                       const int  is_signed;
+                       const int  filter_type;
+               };
+               int (*define_fields)(struct trace_event_call *);
+       };
+};
+
 struct trace_event_class {
        const char              *system;
        void                    *probe;
@@ -200,7 +216,7 @@ struct trace_event_class {
 #endif
        int                     (*reg)(struct trace_event_call *event,
                                       enum trace_reg type, void *data);
-       int                     (*define_fields)(struct trace_event_call *);
+       struct trace_event_fields *fields_array;
        struct list_head        *(*get_fields)(struct trace_event_call *);
        struct list_head        fields;
        int                     (*raw_init)(struct trace_event_call *);
index fb9f4f799554e13a375a9ad52ed7facb69c7d143..6ef1c7109fc4d42bf2f408a01cdef9cafadaf471 100644 (file)
@@ -45,6 +45,7 @@ enum ucount_type {
        UCOUNT_NET_NAMESPACES,
        UCOUNT_MNT_NAMESPACES,
        UCOUNT_CGROUP_NAMESPACES,
+       UCOUNT_TIME_NAMESPACES,
 #ifdef CONFIG_INOTIFY_USER
        UCOUNT_INOTIFY_INSTANCES,
        UCOUNT_INOTIFY_WATCHES,
index a4b24110277199a2043a797c783fc4dd894ff47a..ec38132366992efdcddafd4e8bab9e2edcb3aa1c 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/rbtree.h>
 #include <linux/overflow.h>
 
+#include <asm/vmalloc.h>
+
 struct vm_area_struct;         /* vma defining user mapping in mm_types.h */
 struct notifier_block;         /* in notifier.h */
 
index 86eecbd98e84ab78c13fbdda228b173f9bb404f5..f73e1775ded0171456c7071f471e3e8c28055355 100644 (file)
@@ -416,6 +416,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
        return xa->xa_flags & XA_FLAGS_MARK(mark);
 }
 
+/**
+ * xa_for_each_range() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ * @last: Last index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index.  You may modify @index during the iteration if you
+ * want to skip or reprocess indices.  It is safe to modify the array
+ * during the iteration.  At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n).  You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_range() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_range().
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ */
+#define xa_for_each_range(xa, index, entry, start, last)               \
+       for (index = start,                                             \
+            entry = xa_find(xa, &index, last, XA_PRESENT);             \
+            entry;                                                     \
+            entry = xa_find_after(xa, &index, last, XA_PRESENT))
+
 /**
  * xa_for_each_start() - Iterate over a portion of an XArray.
  * @xa: XArray.
@@ -439,11 +469,8 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
  *
  * Context: Any context.  Takes and releases the RCU lock.
  */
-#define xa_for_each_start(xa, index, entry, start)                     \
-       for (index = start,                                             \
-            entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT);        \
-            entry;                                                     \
-            entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+#define xa_for_each_start(xa, index, entry, start) \
+       xa_for_each_range(xa, index, entry, start, ULONG_MAX)
 
 /**
  * xa_for_each() - Iterate over present entries in an XArray.
@@ -508,6 +535,14 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
                                spin_lock_irqsave(&(xa)->xa_lock, flags)
 #define xa_unlock_irqrestore(xa, flags) \
                                spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+#define xa_lock_nested(xa, subclass) \
+                               spin_lock_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_bh_nested(xa, subclass) \
+                               spin_lock_bh_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irq_nested(xa, subclass) \
+                               spin_lock_irq_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irqsave_nested(xa, flags, subclass) \
+               spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
 
 /*
  * Versions of the normal API which require the caller to hold the
index 059524b87c4c90f310cbf1f08f72721bcf5481c0..f22bd6c838a379108f74c493a127805857552431 100644 (file)
@@ -3548,6 +3548,9 @@ struct cfg80211_update_owe_info {
  *
  * @start_radar_detection: Start radar detection in the driver.
  *
+ * @end_cac: End running CAC, probably because a related CAC
+ *     was finished on another phy.
+ *
  * @update_ft_ies: Provide updated Fast BSS Transition information to the
  *     driver. If the SME is in the driver/firmware, this information can be
  *     used in building Authentication and Reassociation Request frames.
@@ -3874,6 +3877,8 @@ struct cfg80211_ops {
                                         struct net_device *dev,
                                         struct cfg80211_chan_def *chandef,
                                         u32 cac_time_ms);
+       void    (*end_cac)(struct wiphy *wiphy,
+                               struct net_device *dev);
        int     (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
                                 struct cfg80211_update_ft_ies_params *ftie);
        int     (*crit_proto_start)(struct wiphy *wiphy,
index 47f87b2fcf633b1181263772936882910782e4b2..38b4acb93f74725e561252a81679cf08293748c3 100644 (file)
@@ -938,7 +938,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink,
                                             u32 region_max_snapshots,
                                             u64 region_size);
 void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+u32 devlink_region_snapshot_id_get(struct devlink *devlink);
 int devlink_region_snapshot_create(struct devlink_region *region,
                                   u8 *data, u32 snapshot_id,
                                   devlink_snapshot_data_dest_t *data_destructor);
index fe62fe2eb781c12289346b9f1d3ddba037c46736..3448cf865edee88fe97c7651895e47b940d8ae8a 100644 (file)
@@ -82,7 +82,7 @@ struct dst_entry {
 struct dst_metrics {
        u32             metrics[RTAX_MAX];
        refcount_t      refcnt;
-};
+} __aligned(4);                /* Low pointer bits contain DST_METRICS_FLAGS */
 extern const struct dst_metrics dst_default_metrics;
 
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
@@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
        struct dst_entry *dst = skb_dst(skb);
 
        if (dst && dst->ops->update_pmtu)
-               dst->ops->update_pmtu(dst, NULL, skb, mtu);
+               dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       if (dst && dst->ops->update_pmtu)
+               dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
 }
 
 static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
@@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
        u32 encap_mtu = dst_mtu(encap_dst);
 
        if (skb->len > encap_mtu - headroom)
-               skb_dst_update_pmtu(skb, encap_mtu - headroom);
+               skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
 }
 
 #endif /* _NET_DST_H */
index 5ec645f27ee3841f9a4074c94e5dca37bb86fc2e..443863c7b8da362476c15fd290ac2a32a8aa86e3 100644 (file)
@@ -27,7 +27,8 @@ struct dst_ops {
        struct dst_entry *      (*negative_advice)(struct dst_entry *);
        void                    (*link_failure)(struct sk_buff *);
        void                    (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
-                                              struct sk_buff *skb, u32 mtu);
+                                              struct sk_buff *skb, u32 mtu,
+                                              bool confirm_neigh);
        void                    (*redirect)(struct dst_entry *dst, struct sock *sk,
                                            struct sk_buff *skb);
        int                     (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
index c41833bd4590c10f556a94cf93b1a6f1338ccd96..4d9a0c6a2e5f35caf95727449b898c0f832bb0eb 100644 (file)
@@ -37,7 +37,7 @@ struct garp_skb_cb {
 static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb)
 {
        BUILD_BUG_ON(sizeof(struct garp_skb_cb) >
-                    FIELD_SIZEOF(struct sk_buff, cb));
+                    sizeof_field(struct sk_buff, cb));
        return (struct garp_skb_cb *)skb->cb;
 }
 
index af2b4c065a042e36135fe6fdcee9833b6b353364..d0019d3395cf1c86e8b42953ddfb96d77ffe5cd4 100644 (file)
@@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
        struct hlist_head       chain;
 };
 
-/*
- * Sockets can be hashed in established or listening table
+/* Sockets can be hashed in established or listening table.
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
+ * A socket might transition from ESTABLISH to LISTEN state without
+ * RCU grace period. A lookup in ehash table needs to handle this case.
  */
+#define LISTENING_NULLS_BASE (1U << 29)
 struct inet_listen_hashbucket {
        spinlock_t              lock;
        unsigned int            count;
-       struct hlist_head       head;
+       union {
+               struct hlist_head       head;
+               struct hlist_nulls_head nulls_head;
+       };
 };
 
 /* This is for listening sockets, thus all sockets which possess wildcards. */
index af645604f3289957c420ef2d9d0fdbaebc84c0f2..236503a50759a7229b9e9ee31ac99a0f2a7a012e 100644 (file)
@@ -33,8 +33,8 @@
 /* Used to memset ipv4 address padding. */
 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
 #define IP_TUNNEL_KEY_IPV4_PAD_LEN                             \
-       (FIELD_SIZEOF(struct ip_tunnel_key, u) -                \
-        FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+       (sizeof_field(struct ip_tunnel_key, u) -                \
+        sizeof_field(struct ip_tunnel_key, u.ipv4))
 
 struct ip_tunnel_key {
        __be64                  tun_id;
@@ -63,7 +63,7 @@ struct ip_tunnel_key {
 
 /* Maximum tunnel options length. */
 #define IP_TUNNEL_OPTS_MAX                                     \
-       GENMASK((FIELD_SIZEOF(struct ip_tunnel_info,            \
+       GENMASK((sizeof_field(struct ip_tunnel_info,            \
                              options_len) * BITS_PER_BYTE) - 1, 0)
 
 struct ip_tunnel_info {
index ef58b4a071900777170685221cb6d3f261cf709c..1c308c034e1a6835cd9e4cf9b2f4069a58060a04 100644 (file)
@@ -39,7 +39,7 @@ struct mrp_skb_cb {
 static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
 {
        BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
-                    FIELD_SIZEOF(struct sk_buff, cb));
+                    sizeof_field(struct sk_buff, cb));
        return (struct mrp_skb_cb *)skb->cb;
 }
 
index 6ad9ad47a9c54bfbd1772f404f4ae81bf9cc6dd3..8ec77bfdc1a413d0a45edc978075aedab817e62e 100644 (file)
@@ -72,7 +72,6 @@ struct neigh_parms {
        struct net_device *dev;
        struct list_head list;
        int     (*neigh_setup)(struct neighbour *);
-       void    (*neigh_cleanup)(struct neighbour *);
        struct neigh_table *tbl;
 
        void    *sysctl_table;
index 44b5a00a9c64c8150c8eea2616a0a5b0e69223f3..37f0fbefb060f3dd9773978991fe131e80dadf97 100644 (file)
@@ -81,7 +81,7 @@ struct nf_conn_help {
 };
 
 #define NF_CT_HELPER_BUILD_BUG_ON(structsize) \
-       BUILD_BUG_ON((structsize) > FIELD_SIZEOF(struct nf_conn_help, data))
+       BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conn_help, data))
 
 struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
                                                       u16 l3num, u8 protonum);
index f0897b3c97fb8c3c9afdb4d3ba4835aab45c856a..415b8f49d1509f1fa48ee45d4e9917d70a20054e 100644 (file)
@@ -106,6 +106,12 @@ struct flow_offload {
 };
 
 #define NF_FLOW_TIMEOUT (30 * HZ)
+#define nf_flowtable_time_stamp        (u32)jiffies
+
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+       return (__s32)(timeout - nf_flowtable_time_stamp);
+}
 
 struct nf_flow_route {
        struct {
index 7281895fa6d999ba1fe2b67cd0ff676cb7fbcc22..2656155b40697846db4b8f8f6fda49e5f242d7c3 100644 (file)
@@ -41,7 +41,7 @@ struct nft_immediate_expr {
  */
 static inline u32 nft_cmp_fast_mask(unsigned int len)
 {
-       return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr,
+       return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
                                                data) * BITS_PER_BYTE - len));
 }
 
index 286fd960896fdd8fa79b9598cdc8081fc424867d..a1a8d45adb42a91857f6c0db94f914afd502f977 100644 (file)
@@ -7,6 +7,7 @@
 struct netns_nftables {
        struct list_head        tables;
        struct list_head        commit_list;
+       struct list_head        module_list;
        struct mutex            commit_mutex;
        unsigned int            base_seq;
        u8                      gencursor;
index 144f264ea394d18d92d1824926abf2caa3245e36..fceddf89592af4483431d012873bc611a80d7d50 100644 (file)
@@ -308,6 +308,7 @@ struct tcf_proto_ops {
        int                     (*delete)(struct tcf_proto *tp, void *arg,
                                          bool *last, bool rtnl_held,
                                          struct netlink_ext_ack *);
+       bool                    (*delete_empty)(struct tcf_proto *tp);
        void                    (*walk)(struct tcf_proto *tp,
                                        struct tcf_walker *arg, bool rtnl_held);
        int                     (*reoffload)(struct tcf_proto *tp, bool add,
@@ -336,6 +337,10 @@ struct tcf_proto_ops {
        int                     flags;
 };
 
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
+ * conditions can occur when filters are inserted/deleted simultaneously.
+ */
 enum tcf_proto_ops_flags {
        TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
 };
index 87d54ef57f0040fd7bbd4344db0d3af7e6f6d992..8dff68b4c3168a75c762e31e8fe91abb519554e2 100644 (file)
@@ -722,6 +722,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
        hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+       hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
        sock_hold(sk);
@@ -2305,7 +2310,7 @@ struct sock_skb_cb {
  * using skb->cb[] would keep using it directly and utilize its
  * alignement guarantee.
  */
-#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
                            sizeof(struct sock_skb_cb)))
 
 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
@@ -2583,9 +2588,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
  */
 static inline void sk_pacing_shift_update(struct sock *sk, int val)
 {
-       if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+       if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
                return;
-       sk->sk_pacing_shift = val;
+       WRITE_ONCE(sk->sk_pacing_shift, val);
 }
 
 /* if a socket is bound to a device, check that the given device
index 86b9a8766648824c0f122f6c01f55d59bd0d7d72..e6f48384dc7119cae21ef46c74532e0f2577d8a2 100644 (file)
@@ -1766,9 +1766,18 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
        return skb_queue_is_last(&sk->sk_write_queue, skb);
 }
 
+/**
+ * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
+ * @sk: socket
+ *
+ * Since the write queue can have a temporary empty skb in it,
+ * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
+ */
 static inline bool tcp_write_queue_empty(const struct sock *sk)
 {
-       return skb_queue_empty(&sk->sk_write_queue);
+       const struct tcp_sock *tp = tcp_sk(sk);
+
+       return tp->write_seq == tp->snd_nxt;
 }
 
 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
@@ -2138,7 +2147,8 @@ struct tcp_ulp_ops {
        /* initialize ulp */
        int (*init)(struct sock *sk);
        /* update ulp */
-       void (*update)(struct sock *sk, struct proto *p);
+       void (*update)(struct sock *sk, struct proto *p,
+                      void (*write_space)(struct sock *sk));
        /* cleanup ulp */
        void (*release)(struct sock *sk);
        /* diagnostic */
@@ -2153,7 +2163,8 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type);
 int tcp_set_ulp(struct sock *sk, const char *name);
 void tcp_get_available_ulp(char *buf, size_t len);
 void tcp_cleanup_ulp(struct sock *sk);
-void tcp_update_ulp(struct sock *sk, struct proto *p);
+void tcp_update_ulp(struct sock *sk, struct proto *p,
+                   void (*write_space)(struct sock *sk));
 
 #define MODULE_ALIAS_TCP_ULP(name)                             \
        __MODULE_INFO(alias, alias_userspace, name);            \
index ed1acc3044ace48ecd0db0dfe1b7ac3fce23c3fa..d7d6c2b4ffa7153b0caef7dd249ba93f5f39e414 100644 (file)
@@ -62,7 +62,8 @@ enum {
        X25_STATE_1,            /* Awaiting Call Accepted */
        X25_STATE_2,            /* Awaiting Clear Confirmation */
        X25_STATE_3,            /* Data Transfer */
-       X25_STATE_4             /* Awaiting Reset Confirmation */
+       X25_STATE_4,            /* Awaiting Reset Confirmation */
+       X25_STATE_5             /* Call Accepted / Call Connected pending */
 };
 
 enum {
index cacb48faf670690f67fe5c45c79fa242c4c7fc38..5608e14e3aadf893570e4189c81cb5472a27e184 100644 (file)
@@ -2832,6 +2832,11 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
                                struct rdma_user_mmap_entry *entry,
                                size_t length);
+int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+                                     struct rdma_user_mmap_entry *entry,
+                                     size_t length, u32 min_pgoff,
+                                     u32 max_pgoff);
+
 struct rdma_user_mmap_entry *
 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
                               unsigned long pgoff);
similarity index 72%
rename from arch/riscv/include/asm/sifive_l2_cache.h
rename to include/soc/sifive/sifive_l2_cache.h
index 04f6748fc50b18874ccd0a25ae6eaa0d7ab92796..92ade10ed67e94bac8439512cbe3a628958e7b6b 100644 (file)
@@ -4,8 +4,8 @@
  *
  */
 
-#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
-#define _ASM_RISCV_SIFIVE_L2_CACHE_H
+#ifndef __SOC_SIFIVE_L2_CACHE_H
+#define __SOC_SIFIVE_L2_CACHE_H
 
 extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
 extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
@@ -13,4 +13,4 @@ extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
 #define SIFIVE_L2_ERR_TYPE_CE 0
 #define SIFIVE_L2_ERR_TYPE_UE 1
 
-#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
+#endif /* __SOC_SIFIVE_L2_CACHE_H */
index c28a1ed5e8df9f4e82b783d0ac787606763c4258..26289679982647ca6feb176624c1db65948dfa76 100644 (file)
@@ -1150,6 +1150,7 @@ struct snd_soc_pcm_runtime {
        unsigned int num_codecs;
 
        struct delayed_work delayed_work;
+       void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debugfs_dpcm_root;
 #endif
index d5ec4fac82aea89dfe33d0ecc92c4f395d2b1dd9..564ba1b5cf57a1eeef66c8c5531f5d1809f13bbf 100644 (file)
@@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state,
 
 TRACE_EVENT(afs_lookup,
            TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
-                    struct afs_vnode *vnode),
+                    struct afs_fid *fid),
 
-           TP_ARGS(dvnode, name, vnode),
+           TP_ARGS(dvnode, name, fid),
 
            TP_STRUCT__entry(
                    __field_struct(struct afs_fid,      dfid            )
@@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup,
            TP_fast_assign(
                    int __len = min_t(int, name->len, 23);
                    __entry->dfid = dvnode->fid;
-                   if (vnode) {
-                           __entry->fid = vnode->fid;
-                   } else {
-                           __entry->fid.vid = 0;
-                           __entry->fid.vnode = 0;
-                           __entry->fid.unique = 0;
-                   }
+                   __entry->fid = *fid;
                    memcpy(__entry->name, name->name, __len);
                    __entry->name[__len] = 0;
                           ),
index e4526f85c19d3689bb61066e865b9e9f209c8478..0bddea663b3b7eb1fa275ebd1fca4b2f53e78607 100644 (file)
@@ -275,7 +275,8 @@ TRACE_EVENT(bcache_btree_write,
                __entry->keys   = b->keys.set[b->keys.nsets].data->keys;
        ),
 
-       TP_printk("bucket %zu", __entry->bucket)
+       TP_printk("bucket %zu written block %u + %u",
+               __entry->bucket, __entry->block, __entry->keys)
 );
 
 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
index ee05db7ee8d22597899253f45c269ef4ce840de9..796053e162d20787d327332ae194484f12284fd1 100644 (file)
@@ -85,7 +85,7 @@ TRACE_EVENT(file_check_and_advance_wb_err,
                TP_ARGS(file, old),
 
                TP_STRUCT__entry(
-                       __field(struct file *, file);
+                       __field(struct file *, file)
                        __field(unsigned long, i_ino)
                        __field(dev_t, s_dev)
                        __field(errseq_t, old)
index dd4db334bd63717d571851441ecd47786fd260da..d82a0f4e824dd55e74a6791e81ee76a04c4c2d1a 100644 (file)
@@ -31,7 +31,8 @@
        EM( SCAN_ALLOC_HUGE_PAGE_FAIL,  "alloc_huge_page_failed")       \
        EM( SCAN_CGROUP_CHARGE_FAIL,    "ccgroup_charge_failed")        \
        EM( SCAN_EXCEED_SWAP_PTE,       "exceed_swap_pte")              \
-       EMe(SCAN_TRUNCATED,             "truncated")                    \
+       EM( SCAN_TRUNCATED,             "truncated")                    \
+       EMe(SCAN_PAGE_HAS_PRIVATE,      "page_has_private")             \
 
 #undef EM
 #undef EMe
index 95fba0471e5bdc2cc43916b4c02a1b128f8ae1ef..3f249e150c0c3ec7133b93a07011dba2b736f43a 100644 (file)
@@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template,
        TP_ARGS(ip, parent_ip),
 
        TP_STRUCT__entry(
-               __field(u32, caller_offs)
-               __field(u32, parent_offs)
+               __field(s32, caller_offs)
+               __field(s32, parent_offs)
        ),
 
        TP_fast_assign(
-               __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
-               __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+               __entry->caller_offs = (s32)(ip - (unsigned long)_stext);
+               __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext);
        ),
 
        TP_printk("caller=%pS parent=%pS",
index 66122602bd0859781bde38302ea6e208e17c3201..5e49b06e810445f2d07e1144f7d176a7cf0697c2 100644 (file)
@@ -41,7 +41,7 @@ TRACE_EVENT(rcu_utilization,
        TP_printk("%s", __entry->s)
 );
 
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
 
 /*
  * Tracepoint for grace-period events.  Takes a string identifying the
@@ -432,7 +432,7 @@ TRACE_EVENT_RCU(rcu_fqs,
                  __entry->cpu, __entry->qsevent)
 );
 
-#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */
+#endif /* #if defined(CONFIG_TREE_RCU) */
 
 /*
  * Tracepoint for dyntick-idle entry/exit events.  These take a string
@@ -449,7 +449,7 @@ TRACE_EVENT_RCU(rcu_fqs,
  */
 TRACE_EVENT_RCU(rcu_dyntick,
 
-       TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
+       TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
 
        TP_ARGS(polarity, oldnesting, newnesting, dynticks),
 
@@ -464,7 +464,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
                __entry->polarity = polarity;
                __entry->oldnesting = oldnesting;
                __entry->newnesting = newnesting;
-               __entry->dynticks = atomic_read(&dynticks);
+               __entry->dynticks = dynticks;
        ),
 
        TP_printk("%s %lx %lx %#3x", __entry->polarity,
@@ -481,16 +481,14 @@ TRACE_EVENT_RCU(rcu_dyntick,
  */
 TRACE_EVENT_RCU(rcu_callback,
 
-       TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
-                long qlen),
+       TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
 
-       TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
+       TP_ARGS(rcuname, rhp, qlen),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
                __field(void *, rhp)
                __field(void *, func)
-               __field(long, qlen_lazy)
                __field(long, qlen)
        ),
 
@@ -498,13 +496,12 @@ TRACE_EVENT_RCU(rcu_callback,
                __entry->rcuname = rcuname;
                __entry->rhp = rhp;
                __entry->func = rhp->func;
-               __entry->qlen_lazy = qlen_lazy;
                __entry->qlen = qlen;
        ),
 
-       TP_printk("%s rhp=%p func=%ps %ld/%ld",
+       TP_printk("%s rhp=%p func=%ps %ld",
                  __entry->rcuname, __entry->rhp, __entry->func,
-                 __entry->qlen_lazy, __entry->qlen)
+                 __entry->qlen)
 );
 
 /*
@@ -518,15 +515,14 @@ TRACE_EVENT_RCU(rcu_callback,
 TRACE_EVENT_RCU(rcu_kfree_callback,
 
        TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
-                long qlen_lazy, long qlen),
+                long qlen),
 
-       TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
+       TP_ARGS(rcuname, rhp, offset, qlen),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
                __field(void *, rhp)
                __field(unsigned long, offset)
-               __field(long, qlen_lazy)
                __field(long, qlen)
        ),
 
@@ -534,13 +530,12 @@ TRACE_EVENT_RCU(rcu_kfree_callback,
                __entry->rcuname = rcuname;
                __entry->rhp = rhp;
                __entry->offset = offset;
-               __entry->qlen_lazy = qlen_lazy;
                __entry->qlen = qlen;
        ),
 
-       TP_printk("%s rhp=%p func=%ld %ld/%ld",
+       TP_printk("%s rhp=%p func=%ld %ld",
                  __entry->rcuname, __entry->rhp, __entry->offset,
-                 __entry->qlen_lazy, __entry->qlen)
+                 __entry->qlen)
 );
 
 /*
@@ -552,27 +547,24 @@ TRACE_EVENT_RCU(rcu_kfree_callback,
  */
 TRACE_EVENT_RCU(rcu_batch_start,
 
-       TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
+       TP_PROTO(const char *rcuname, long qlen, long blimit),
 
-       TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+       TP_ARGS(rcuname, qlen, blimit),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(long, qlen_lazy)
                __field(long, qlen)
                __field(long, blimit)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->qlen_lazy = qlen_lazy;
                __entry->qlen = qlen;
                __entry->blimit = blimit;
        ),
 
-       TP_printk("%s CBs=%ld/%ld bl=%ld",
-                 __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
-                 __entry->blimit)
+       TP_printk("%s CBs=%ld bl=%ld",
+                 __entry->rcuname, __entry->qlen, __entry->blimit)
 );
 
 /*
index 26927a560eabca1a880d9d031b610588bbb6e3ea..3c716214dab1a36543cd8234798f21eda774d09b 100644 (file)
@@ -74,6 +74,12 @@ DEFINE_EVENT(rpm_internal, rpm_idle,
 
        TP_ARGS(dev, flags)
 );
+DEFINE_EVENT(rpm_internal, rpm_usage,
+
+       TP_PROTO(struct device *dev, int flags),
+
+       TP_ARGS(dev, flags)
+);
 
 TRACE_EVENT(rpm_return_int,
        TP_PROTO(struct device *dev, unsigned long ip, int ret),
index e172549283beaa1ed24d02871032b945ef8f800d..9b8ae961acc53aad73830b7c2bf200eecf7064ec 100644 (file)
@@ -8,23 +8,6 @@
 #include <linux/tracepoint.h>
 #include <linux/workqueue.h>
 
-DECLARE_EVENT_CLASS(workqueue_work,
-
-       TP_PROTO(struct work_struct *work),
-
-       TP_ARGS(work),
-
-       TP_STRUCT__entry(
-               __field( void *,        work    )
-       ),
-
-       TP_fast_assign(
-               __entry->work           = work;
-       ),
-
-       TP_printk("work struct %p", __entry->work)
-);
-
 struct pool_workqueue;
 
 /**
@@ -73,11 +56,21 @@ TRACE_EVENT(workqueue_queue_work,
  * which happens immediately after queueing unless @max_active limit
  * is reached.
  */
-DEFINE_EVENT(workqueue_work, workqueue_activate_work,
+TRACE_EVENT(workqueue_activate_work,
 
        TP_PROTO(struct work_struct *work),
 
-       TP_ARGS(work)
+       TP_ARGS(work),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+       ),
+
+       TP_printk("work struct %p", __entry->work)
 );
 
 /**
@@ -108,14 +101,27 @@ TRACE_EVENT(workqueue_execute_start,
 /**
  * workqueue_execute_end - called immediately after the workqueue callback
  * @work:      pointer to struct work_struct
+ * @function:   pointer to worker function
  *
  * Allows to track workqueue execution.
  */
-DEFINE_EVENT(workqueue_work, workqueue_execute_end,
+TRACE_EVENT(workqueue_execute_end,
 
-       TP_PROTO(struct work_struct *work),
+       TP_PROTO(struct work_struct *work, work_func_t function),
 
-       TP_ARGS(work)
+       TP_ARGS(work, function),
+
+       TP_STRUCT__entry(
+               __field( void *,        work    )
+               __field( void *,        function)
+       ),
+
+       TP_fast_assign(
+               __entry->work           = work;
+               __entry->function       = function;
+       ),
+
+       TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
 );
 
 #endif /*  _TRACE_WORKQUEUE_H */
index 9a0e8af21310c305d726bb5af6dbd015ec0202a1..a5ccfa67bc5cbcb4aae04d29012a08e215f8f7c3 100644 (file)
@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
            TP_PROTO(xen_mc_callback_fn_t fn, void *data),
            TP_ARGS(fn, data),
            TP_STRUCT__entry(
-                   __field(xen_mc_callback_fn_t, fn)
+                   /*
+                    * Use field_struct to avoid is_signed_type()
+                    * comparison of a function pointer.
+                    */
+                   __field_struct(xen_mc_callback_fn_t, fn)
                    __field(void *, data)
                    ),
            TP_fast_assign(
index 472b33d23a10ea628b1d22ff2ea9f901460e269f..96d77e5e066437ca66fef85ffe21d1d5bd8dd438 100644 (file)
@@ -400,22 +400,16 @@ static struct trace_event_functions trace_event_type_funcs_##call = {     \
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
 #undef __field_ext
-#define __field_ext(type, item, filter_type)                           \
-       ret = trace_define_field(event_call, #type, #item,              \
-                                offsetof(typeof(field), item),         \
-                                sizeof(field.item),                    \
-                                is_signed_type(type), filter_type);    \
-       if (ret)                                                        \
-               return ret;
+#define __field_ext(_type, _item, _filter_type) {                      \
+       .type = #_type, .name = #_item,                                 \
+       .size = sizeof(_type), .align = __alignof__(_type),             \
+       .is_signed = is_signed_type(_type), .filter_type = _filter_type },
 
 #undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)                    \
-       ret = trace_define_field(event_call, #type, #item,              \
-                                offsetof(typeof(field), item),         \
-                                sizeof(field.item),                    \
-                                0, filter_type);                       \
-       if (ret)                                                        \
-               return ret;
+#define __field_struct_ext(_type, _item, _filter_type) {               \
+       .type = #_type, .name = #_item,                                 \
+       .size = sizeof(_type), .align = __alignof__(_type),             \
+       0, .filter_type = _filter_type },
 
 #undef __field
 #define __field(type, item)    __field_ext(type, item, FILTER_OTHER)
@@ -424,25 +418,16 @@ static struct trace_event_functions trace_event_type_funcs_##call = {     \
 #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
 
 #undef __array
-#define __array(type, item, len)                                       \
-       do {                                                            \
-               char *type_str = #type"["__stringify(len)"]";           \
-               BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
-               BUILD_BUG_ON(len <= 0);                                 \
-               ret = trace_define_field(event_call, type_str, #item,   \
-                                offsetof(typeof(field), item),         \
-                                sizeof(field.item),                    \
-                                is_signed_type(type), FILTER_OTHER);   \
-               if (ret)                                                \
-                       return ret;                                     \
-       } while (0);
+#define __array(_type, _item, _len) {                                  \
+       .type = #_type"["__stringify(_len)"]", .name = #_item,          \
+       .size = sizeof(_type[_len]), .align = __alignof__(_type),       \
+       .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
 
 #undef __dynamic_array
-#define __dynamic_array(type, item, len)                                      \
-       ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
-                                offsetof(typeof(field), __data_loc_##item),   \
-                                sizeof(field.__data_loc_##item),              \
-                                is_signed_type(type), FILTER_OTHER);
+#define __dynamic_array(_type, _item, _len) {                          \
+       .type = "__data_loc " #_type "[]", .name = #_item,              \
+       .size = 4, .align = 4,                                          \
+       .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
 
 #undef __string
 #define __string(item, src) __dynamic_array(char, item, -1)
@@ -452,16 +437,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = {      \
 
 #undef DECLARE_EVENT_CLASS
 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)   \
-static int notrace __init                                              \
-trace_event_define_fields_##call(struct trace_event_call *event_call)  \
-{                                                                      \
-       struct trace_event_raw_##call field;                            \
-       int ret;                                                        \
-                                                                       \
-       tstruct;                                                        \
-                                                                       \
-       return ret;                                                     \
-}
+static struct trace_event_fields trace_event_fields_##call[] = {       \
+       tstruct                                                         \
+       {} };
 
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, name, proto, args)
@@ -619,7 +597,7 @@ static inline notrace int trace_event_get_offsets_##call(           \
  *
  * static struct trace_event_class __used event_class_<template> = {
  *     .system                 = "<system>",
- *     .define_fields          = trace_event_define_fields_<call>,
+ *     .fields_array           = trace_event_fields_<call>,
  *     .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
  *     .raw_init               = trace_event_raw_init,
  *     .probe                  = trace_event_raw_event_##call,
@@ -768,7 +746,7 @@ _TRACE_PERF_PROTO(call, PARAMS(proto));                                     \
 static char print_fmt_##call[] = print;                                        \
 static struct trace_event_class __used __refdata event_class_##call = { \
        .system                 = TRACE_SYSTEM_STRING,                  \
-       .define_fields          = trace_event_define_fields_##call,     \
+       .fields_array           = trace_event_fields_##call,            \
        .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
        .raw_init               = trace_event_raw_init,                 \
        .probe                  = trace_event_raw_event_##call,         \
index c160a5354eb62b3b17de564be439451c812470ae..f94f65d429bea3c26bdcdc3197376916399089e9 100644 (file)
@@ -11,6 +11,8 @@
 #define PROT_WRITE     0x2             /* page can be written */
 #define PROT_EXEC      0x4             /* page can be executed */
 #define PROT_SEM       0x8             /* page may be used for atomic ops */
+/*                     0x10               reserved for arch-specific use */
+/*                     0x20               reserved for arch-specific use */
 #define PROT_NONE      0x0             /* page can not be accessed */
 #define PROT_GROWSDOWN 0x01000000      /* mprotect flag: extend change to start of growsdown vma */
 #define PROT_GROWSUP   0x02000000      /* mprotect flag: extend change to end of growsup vma */
index 5d4f58e059fd5d8a352ce4506424f8081f2df2fd..9a1965c6c3d0d3b9bee6094c81505520fe9c635b 100644 (file)
@@ -148,6 +148,7 @@ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
 #define BCACHE_SB_MAX_VERSION          4
 
 #define SB_SECTOR                      8
+#define SB_OFFSET                      (SB_SECTOR << SECTOR_SHIFT)
 #define SB_SIZE                                4096
 #define SB_LABEL_SIZE                  32
 #define SB_JOURNAL_BUCKETS             256U
@@ -156,6 +157,57 @@ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
 
 #define BDEV_DATA_START_DEFAULT                16      /* sectors */
 
+struct cache_sb_disk {
+       __le64                  csum;
+       __le64                  offset; /* sector where this sb was written */
+       __le64                  version;
+
+       __u8                    magic[16];
+
+       __u8                    uuid[16];
+       union {
+               __u8            set_uuid[16];
+               __le64          set_magic;
+       };
+       __u8                    label[SB_LABEL_SIZE];
+
+       __le64                  flags;
+       __le64                  seq;
+       __le64                  pad[8];
+
+       union {
+       struct {
+               /* Cache devices */
+               __le64          nbuckets;       /* device size */
+
+               __le16          block_size;     /* sectors */
+               __le16          bucket_size;    /* sectors */
+
+               __le16          nr_in_set;
+               __le16          nr_this_dev;
+       };
+       struct {
+               /* Backing devices */
+               __le64          data_offset;
+
+               /*
+                * block_size from the cache device section is still used by
+                * backing devices, so don't add anything here until we fix
+                * things to not need it for backing devices anymore
+                */
+       };
+       };
+
+       __le32                  last_mount;     /* time overflow in y2106 */
+
+       __le16                  first_bucket;
+       union {
+               __le16          njournal_buckets;
+               __le16          keys;
+       };
+       __le64                  d[SB_JOURNAL_BUCKETS];  /* journal buckets */
+};
+
 struct cache_sb {
        __u64                   csum;
        __u64                   offset; /* sector where this sb was written */
index 98e2c493de8569ae3772ec946a9e92d3fc760b5d..4913539e5bcc272dbd54b82dfc3f3733eb6793cd 100644 (file)
@@ -39,6 +39,7 @@ struct hidraw_devinfo {
 /* The first byte of SFEATURE and GFEATURE is the report number */
 #define HIDIOCSFEATURE(len)    _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
 #define HIDIOCGFEATURE(len)    _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
+#define HIDIOCGRAWUNIQ(len)     _IOC(_IOC_READ, 'H', 0x08, len)
 
 #define HIDRAW_FIRST_MINOR 0
 #define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
new file mode 100644 (file)
index 0000000..849ef15
--- /dev/null
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _USR_IDXD_H_
+#define _USR_IDXD_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/* Descriptor flags */
+#define IDXD_OP_FLAG_FENCE     0x0001
+#define IDXD_OP_FLAG_BOF       0x0002
+#define IDXD_OP_FLAG_CRAV      0x0004
+#define IDXD_OP_FLAG_RCR       0x0008
+#define IDXD_OP_FLAG_RCI       0x0010
+#define IDXD_OP_FLAG_CRSTS     0x0020
+#define IDXD_OP_FLAG_CR                0x0080
+#define IDXD_OP_FLAG_CC                0x0100
+#define IDXD_OP_FLAG_ADDR1_TCS 0x0200
+#define IDXD_OP_FLAG_ADDR2_TCS 0x0400
+#define IDXD_OP_FLAG_ADDR3_TCS 0x0800
+#define IDXD_OP_FLAG_CR_TCS    0x1000
+#define IDXD_OP_FLAG_STORD     0x2000
+#define IDXD_OP_FLAG_DRDBK     0x4000
+#define IDXD_OP_FLAG_DSTS      0x8000
+
+/* Opcode */
+enum dsa_opcode {
+       DSA_OPCODE_NOOP = 0,
+       DSA_OPCODE_BATCH,
+       DSA_OPCODE_DRAIN,
+       DSA_OPCODE_MEMMOVE,
+       DSA_OPCODE_MEMFILL,
+       DSA_OPCODE_COMPARE,
+       DSA_OPCODE_COMPVAL,
+       DSA_OPCODE_CR_DELTA,
+       DSA_OPCODE_AP_DELTA,
+       DSA_OPCODE_DUALCAST,
+       DSA_OPCODE_CRCGEN = 0x10,
+       DSA_OPCODE_COPY_CRC,
+       DSA_OPCODE_DIF_CHECK,
+       DSA_OPCODE_DIF_INS,
+       DSA_OPCODE_DIF_STRP,
+       DSA_OPCODE_DIF_UPDT,
+       DSA_OPCODE_CFLUSH = 0x20,
+};
+
+/* Completion record status */
+enum dsa_completion_status {
+       DSA_COMP_NONE = 0,
+       DSA_COMP_SUCCESS,
+       DSA_COMP_SUCCESS_PRED,
+       DSA_COMP_PAGE_FAULT_NOBOF,
+       DSA_COMP_PAGE_FAULT_IR,
+       DSA_COMP_BATCH_FAIL,
+       DSA_COMP_BATCH_PAGE_FAULT,
+       DSA_COMP_DR_OFFSET_NOINC,
+       DSA_COMP_DR_OFFSET_ERANGE,
+       DSA_COMP_DIF_ERR,
+       DSA_COMP_BAD_OPCODE = 0x10,
+       DSA_COMP_INVALID_FLAGS,
+       DSA_COMP_NOZERO_RESERVE,
+       DSA_COMP_XFER_ERANGE,
+       DSA_COMP_DESC_CNT_ERANGE,
+       DSA_COMP_DR_ERANGE,
+       DSA_COMP_OVERLAP_BUFFERS,
+       DSA_COMP_DCAST_ERR,
+       DSA_COMP_DESCLIST_ALIGN,
+       DSA_COMP_INT_HANDLE_INVAL,
+       DSA_COMP_CRA_XLAT,
+       DSA_COMP_CRA_ALIGN,
+       DSA_COMP_ADDR_ALIGN,
+       DSA_COMP_PRIV_BAD,
+       DSA_COMP_TRAFFIC_CLASS_CONF,
+       DSA_COMP_PFAULT_RDBA,
+       DSA_COMP_HW_ERR1,
+       DSA_COMP_HW_ERR_DRB,
+       DSA_COMP_TRANSLATION_FAIL,
+};
+
+#define DSA_COMP_STATUS_MASK           0x7f
+#define DSA_COMP_STATUS_WRITE          0x80
+
+struct dsa_batch_desc {
+       uint32_t        pasid:20;
+       uint32_t        rsvd:11;
+       uint32_t        priv:1;
+       uint32_t        flags:24;
+       uint32_t        opcode:8;
+       uint64_t        completion_addr;
+       uint64_t        desc_list_addr;
+       uint64_t        rsvd1;
+       uint32_t        desc_count;
+       uint16_t        interrupt_handle;
+       uint16_t        rsvd2;
+       uint8_t         rsvd3[24];
+} __attribute__((packed));
+
+struct dsa_hw_desc {
+       uint32_t        pasid:20;
+       uint32_t        rsvd:11;
+       uint32_t        priv:1;
+       uint32_t        flags:24;
+       uint32_t        opcode:8;
+       uint64_t        completion_addr;
+       union {
+               uint64_t        src_addr;
+               uint64_t        rdback_addr;
+               uint64_t        pattern;
+       };
+       union {
+               uint64_t        dst_addr;
+               uint64_t        rdback_addr2;
+               uint64_t        src2_addr;
+               uint64_t        comp_pattern;
+       };
+       uint32_t        xfer_size;
+       uint16_t        int_handle;
+       uint16_t        rsvd1;
+       union {
+               uint8_t         expected_res;
+               struct {
+                       uint64_t        delta_addr;
+                       uint32_t        max_delta_size;
+               };
+               uint32_t        delta_rec_size;
+               uint64_t        dest2;
+               /* CRC */
+               struct {
+                       uint32_t        crc_seed;
+                       uint32_t        crc_rsvd;
+                       uint64_t        seed_addr;
+               };
+               /* DIF check or strip */
+               struct {
+                       uint8_t         src_dif_flags;
+                       uint8_t         dif_chk_res;
+                       uint8_t         dif_chk_flags;
+                       uint8_t         dif_chk_res2[5];
+                       uint32_t        chk_ref_tag_seed;
+                       uint16_t        chk_app_tag_mask;
+                       uint16_t        chk_app_tag_seed;
+               };
+               /* DIF insert */
+               struct {
+                       uint8_t         dif_ins_res;
+                       uint8_t         dest_dif_flag;
+                       uint8_t         dif_ins_flags;
+                       uint8_t         dif_ins_res2[13];
+                       uint32_t        ins_ref_tag_seed;
+                       uint16_t        ins_app_tag_mask;
+                       uint16_t        ins_app_tag_seed;
+               };
+               /* DIF update */
+               struct {
+                       uint8_t         src_upd_flags;
+                       uint8_t         upd_dest_flags;
+                       uint8_t         dif_upd_flags;
+                       uint8_t         dif_upd_res[5];
+                       uint32_t        src_ref_tag_seed;
+                       uint16_t        src_app_tag_mask;
+                       uint16_t        src_app_tag_seed;
+                       uint32_t        dest_ref_tag_seed;
+                       uint16_t        dest_app_tag_mask;
+                       uint16_t        dest_app_tag_seed;
+               };
+
+               uint8_t         op_specific[24];
+       };
+} __attribute__((packed));
+
+struct dsa_raw_desc {
+       uint64_t        field[8];
+} __attribute__((packed));
+
+/*
+ * The status field will be modified by hardware, therefore it should be
+ * volatile and prevent the compiler from optimize the read.
+ */
+struct dsa_completion_record {
+       volatile uint8_t        status;
+       union {
+               uint8_t         result;
+               uint8_t         dif_status;
+       };
+       uint16_t                rsvd;
+       uint32_t                bytes_completed;
+       uint64_t                fault_addr;
+       union {
+               uint16_t        delta_rec_size;
+               uint16_t        crc_val;
+
+               /* DIF check & strip */
+               struct {
+                       uint32_t        dif_chk_ref_tag;
+                       uint16_t        dif_chk_app_tag_mask;
+                       uint16_t        dif_chk_app_tag;
+               };
+
+               /* DIF insert */
+               struct {
+                       uint64_t        dif_ins_res;
+                       uint32_t        dif_ins_ref_tag;
+                       uint16_t        dif_ins_app_tag_mask;
+                       uint16_t        dif_ins_app_tag;
+               };
+
+               /* DIF update */
+               struct {
+                       uint32_t        dif_upd_src_ref_tag;
+                       uint16_t        dif_upd_src_app_tag_mask;
+                       uint16_t        dif_upd_src_app_tag;
+                       uint32_t        dif_upd_dest_ref_tag;
+                       uint16_t        dif_upd_dest_app_tag_mask;
+                       uint16_t        dif_upd_dest_app_tag;
+               };
+
+               uint8_t         op_specific[16];
+       };
+} __attribute__((packed));
+
+struct dsa_raw_completion_record {
+       uint64_t        field[4];
+} __attribute__((packed));
+
+#endif
index f056b2a00d5c7695a69826786b5e2336c79c3c31..9a61c28ed3ae48cf2107ffc8b9ea0647e9ccb4f8 100644 (file)
@@ -34,6 +34,7 @@ struct input_event {
        __kernel_ulong_t __sec;
 #if defined(__sparc__) && defined(__arch64__)
        unsigned int __usec;
+       unsigned int __pad;
 #else
        __kernel_ulong_t __usec;
 #endif
index eabccb46edd1264250320891f180ed7243ae990d..55cfcb71606dbd6130206223f017efcb0a8de74e 100644 (file)
@@ -48,6 +48,7 @@ struct io_uring_sqe {
 #define IOSQE_FIXED_FILE       (1U << 0)       /* use fixed fileset */
 #define IOSQE_IO_DRAIN         (1U << 1)       /* issue after inflight IO */
 #define IOSQE_IO_LINK          (1U << 2)       /* links next sqe */
+#define IOSQE_IO_HARDLINK      (1U << 3)       /* like LINK, but stronger */
 
 /*
  * io_uring_setup() flags
@@ -57,23 +58,28 @@ struct io_uring_sqe {
 #define IORING_SETUP_SQ_AFF    (1U << 2)       /* sq_thread_cpu is valid */
 #define IORING_SETUP_CQSIZE    (1U << 3)       /* app defines CQ size */
 
-#define IORING_OP_NOP          0
-#define IORING_OP_READV                1
-#define IORING_OP_WRITEV       2
-#define IORING_OP_FSYNC                3
-#define IORING_OP_READ_FIXED   4
-#define IORING_OP_WRITE_FIXED  5
-#define IORING_OP_POLL_ADD     6
-#define IORING_OP_POLL_REMOVE  7
-#define IORING_OP_SYNC_FILE_RANGE      8
-#define IORING_OP_SENDMSG      9
-#define IORING_OP_RECVMSG      10
-#define IORING_OP_TIMEOUT      11
-#define IORING_OP_TIMEOUT_REMOVE       12
-#define IORING_OP_ACCEPT       13
-#define IORING_OP_ASYNC_CANCEL 14
-#define IORING_OP_LINK_TIMEOUT 15
-#define IORING_OP_CONNECT      16
+enum {
+       IORING_OP_NOP,
+       IORING_OP_READV,
+       IORING_OP_WRITEV,
+       IORING_OP_FSYNC,
+       IORING_OP_READ_FIXED,
+       IORING_OP_WRITE_FIXED,
+       IORING_OP_POLL_ADD,
+       IORING_OP_POLL_REMOVE,
+       IORING_OP_SYNC_FILE_RANGE,
+       IORING_OP_SENDMSG,
+       IORING_OP_RECVMSG,
+       IORING_OP_TIMEOUT,
+       IORING_OP_TIMEOUT_REMOVE,
+       IORING_OP_ACCEPT,
+       IORING_OP_ASYNC_CANCEL,
+       IORING_OP_LINK_TIMEOUT,
+       IORING_OP_CONNECT,
+
+       /* this goes last, obviously */
+       IORING_OP_LAST,
+};
 
 /*
  * sqe->fsync_flags
@@ -172,7 +178,8 @@ struct io_uring_params {
 
 struct io_uring_files_update {
        __u32 offset;
-       __s32 *fds;
+       __u32 resv;
+       __aligned_u64 /* __s32 * */ fds;
 };
 
 #endif
index 409d3ad1e6e288e8192a6632655ee51d2906132c..1d0350e44ae34c57ae029de544a51c47d9fb0b95 100644 (file)
@@ -9,11 +9,11 @@
  * and the comment before kcov_remote_start() for usage details.
  */
 struct kcov_remote_arg {
-       unsigned int    trace_mode;     /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
-       unsigned int    area_size;      /* Length of coverage buffer in words */
-       unsigned int    num_handles;    /* Size of handles array */
-       __u64           common_handle;
-       __u64           handles[0];
+       __u32           trace_mode;     /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
+       __u32           area_size;      /* Length of coverage buffer in words */
+       __u32           num_handles;    /* Size of handles array */
+       __aligned_u64   common_handle;
+       __aligned_u64   handles[0];
 };
 
 #define KCOV_REMOTE_MAX_HANDLES                0x100
index 4bc6d1a087816a67a560ccf333af3a8bab1cbfc5..b4d804a9fccb2d36e242149177f8c522cff12c60 100644 (file)
@@ -41,19 +41,19 @@ struct xt_sctp_info {
 #define SCTP_CHUNKMAP_SET(chunkmap, type)              \
        do {                                            \
                (chunkmap)[type / bytes(__u32)] |=      \
-                       1 << (type % bytes(__u32));     \
+                       1u << (type % bytes(__u32));    \
        } while (0)
 
 #define SCTP_CHUNKMAP_CLEAR(chunkmap, type)                    \
        do {                                                    \
                (chunkmap)[type / bytes(__u32)] &=              \
-                       ~(1 << (type % bytes(__u32)));  \
+                       ~(1u << (type % bytes(__u32))); \
        } while (0)
 
 #define SCTP_CHUNKMAP_IS_SET(chunkmap, type)                   \
 ({                                                             \
        ((chunkmap)[type / bytes (__u32)] &             \
-               (1 << (type % bytes (__u32)))) ? 1: 0;  \
+               (1u << (type % bytes (__u32)))) ? 1: 0; \
 })
 
 #define SCTP_CHUNKMAP_RESET(chunkmap) \
index 341e0e8cae46244debd4fe85152daed8927cbdef..5eab191607f86ca64cf6d10267d95c355a95e949 100644 (file)
@@ -5517,6 +5517,10 @@ enum nl80211_feature_flags {
  *     with VLAN tagged frames and separate VLAN-specific netdevs added using
  *     vconfig similarly to the Ethernet case.
  *
+ * @NL80211_EXT_FEATURE_AQL: The driver supports the Airtime Queue Limit (AQL)
+ *     feature, which prevents bufferbloat by using the expected transmission
+ *     time to limit the amount of data buffered in the hardware.
+ *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
  */
@@ -5563,6 +5567,7 @@ enum nl80211_ext_feature_index {
        NL80211_EXT_FEATURE_STA_TX_PWR,
        NL80211_EXT_FEATURE_SAE_OFFLOAD,
        NL80211_EXT_FEATURE_VLAN_OFFLOAD,
+       NL80211_EXT_FEATURE_AQL,
 
        /* add new features before the definition below */
        NUM_NL80211_EXT_FEATURES,
index 4a02178324641f555336164316ba2e30805c3719..2e3bc22c6f202f6280ef7279de60fa966b84d3ed 100644 (file)
 /* Flags for the clone3() syscall. */
 #define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
 
+/*
+ * cloning flags intersect with CSIGNAL so can be used with unshare and clone3
+ * syscalls only:
+ */
+#define CLONE_NEWTIME  0x00000080      /* New time namespace */
+
 #ifndef __ASSEMBLY__
 /**
  * struct clone_args - arguments for the clone3 syscall
index 2e302c0f41f7e109c55711c51e3698acdf3d6557..c5f347cc5e55e75f86dc32506816c75c00e66a2e 100644 (file)
@@ -21,6 +21,8 @@
 #define CS_RAW         1
 #define CS_BASES       (CS_RAW + 1)
 
+#define VCLOCK_TIMENS  UINT_MAX
+
 /**
  * struct vdso_timestamp - basetime per clock_id
  * @sec:       seconds
@@ -48,6 +50,7 @@ struct vdso_timestamp {
  * @mult:              clocksource multiplier
  * @shift:             clocksource shift
  * @basetime[clock_id]:        basetime per clock_id
+ * @offset[clock_id]:  time namespace offset per clock_id
  * @tz_minuteswest:    minutes west of Greenwich
  * @tz_dsttime:                type of DST correction
  * @hrtimer_res:       hrtimer resolution
@@ -55,6 +58,17 @@ struct vdso_timestamp {
  *
  * vdso_data will be accessed by 64 bit and compat code at the same time
  * so we should be careful before modifying this structure.
+ *
+ * @basetime is used to store the base time for the system wide time getter
+ * VVAR page.
+ *
+ * @offset is used by the special time namespace VVAR pages which are
+ * installed instead of the real VVAR page. These namespace pages must set
+ * @seq to 1 and @clock_mode to VLOCK_TIMENS to force the code into the
+ * time namespace slow path. The namespace aware functions retrieve the
+ * real system wide VVAR page, read host time and add the per clock offset.
+ * For clocks which are not affected by time namespace adjustment the
+ * offset must be zero.
  */
 struct vdso_data {
        u32                     seq;
@@ -65,7 +79,10 @@ struct vdso_data {
        u32                     mult;
        u32                     shift;
 
-       struct vdso_timestamp   basetime[VDSO_BASES];
+       union {
+               struct vdso_timestamp   basetime[VDSO_BASES];
+               struct timens_offset    offset[VDSO_BASES];
+       };
 
        s32                     tz_minuteswest;
        s32                     tz_dsttime;
index 01641dbb68ef09cf106746bea5722f740548b314..9a2af9fca45e21a1d87947e8ba0f221225dc2cb6 100644 (file)
@@ -10,7 +10,7 @@ static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
 {
        u32 seq;
 
-       while ((seq = READ_ONCE(vd->seq)) & 1)
+       while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
                cpu_relax();
 
        smp_rmb();
index 3f40501fc60b1d9f95a7455ed0d29f97d8bd9571..2af7a1cd665893d9ff509730c73e0e0852a6881a 100644 (file)
@@ -125,35 +125,24 @@ struct __name##_back_ring {                                               \
     memset((_s)->pad, 0, sizeof((_s)->pad));                           \
 } while(0)
 
-#define FRONT_RING_INIT(_r, _s, __size) do {                           \
-    (_r)->req_prod_pvt = 0;                                            \
-    (_r)->rsp_cons = 0;                                                        \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do {                     \
+    (_r)->req_prod_pvt = (_i);                                         \
+    (_r)->rsp_cons = (_i);                                             \
     (_r)->nr_ents = __RING_SIZE(_s, __size);                           \
     (_r)->sring = (_s);                                                        \
 } while (0)
 
-#define BACK_RING_INIT(_r, _s, __size) do {                            \
-    (_r)->rsp_prod_pvt = 0;                                            \
-    (_r)->req_cons = 0;                                                        \
-    (_r)->nr_ents = __RING_SIZE(_s, __size);                           \
-    (_r)->sring = (_s);                                                        \
-} while (0)
+#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
 
-/* Initialize to existing shared indexes -- for recovery */
-#define FRONT_RING_ATTACH(_r, _s, __size) do {                         \
-    (_r)->sring = (_s);                                                        \
-    (_r)->req_prod_pvt = (_s)->req_prod;                               \
-    (_r)->rsp_cons = (_s)->rsp_prod;                                   \
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do {                      \
+    (_r)->rsp_prod_pvt = (_i);                                         \
+    (_r)->req_cons = (_i);                                             \
     (_r)->nr_ents = __RING_SIZE(_s, __size);                           \
-} while (0)
-
-#define BACK_RING_ATTACH(_r, _s, __size) do {                          \
     (_r)->sring = (_s);                                                        \
-    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                               \
-    (_r)->req_cons = (_s)->req_prod;                                   \
-    (_r)->nr_ents = __RING_SIZE(_s, __size);                           \
 } while (0)
 
+#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
+
 /* How big is this ring? */
 #define RING_SIZE(_r)                                                  \
     ((_r)->nr_ents)
index d89969aa9942ce39df52674bb911600ab16eac48..095be1d66f31c8ad66ebf77ce70c364fbbaf9612 100644 (file)
@@ -215,7 +215,7 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
 void xen_efi_runtime_setup(void);
 
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 
 static inline void xen_preemptible_hcall_begin(void)
 {
@@ -239,6 +239,6 @@ static inline void xen_preemptible_hcall_end(void)
        __this_cpu_write(xen_in_preemptible_hcall, false);
 }
 
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
 #endif /* INCLUDE_XEN_OPS_H */
index 869c816d5f8c3097b09298a9d086e7f75fff540e..24228a102141e3981911bba20712ca53dcb2b219 100644 (file)
@@ -93,6 +93,7 @@ struct xenbus_device_id
 struct xenbus_driver {
        const char *name;       /* defaults to ids[0].devicetype */
        const struct xenbus_device_id *ids;
+       bool allow_rebind; /* avoid setting xenstore closed during remove */
        int (*probe)(struct xenbus_device *dev,
                     const struct xenbus_device_id *id);
        void (*otherend_changed)(struct xenbus_device *dev,
index a34064a031a5ed785eb4c7bab830408f1fb2608a..79df8b0046b13aaffb6c946816bef74b9f7227fd 100644 (file)
@@ -58,7 +58,7 @@ config CONSTRUCTORS
 config IRQ_WORK
        bool
 
-config BUILDTIME_EXTABLE_SORT
+config BUILDTIME_TABLE_SORT
        bool
 
 config THREAD_INFO_IN_TASK
@@ -1080,6 +1080,14 @@ config UTS_NS
          In this namespace tasks see different info provided with the
          uname() system call
 
+config TIME_NS
+       bool "TIME namespace"
+       depends on GENERIC_VDSO_TIME_NS
+       default y
+       help
+         In this namespace boottime and monotonic clocks can be set.
+         The time will keep going with the same pace.
+
 config IPC_NS
        bool "IPC namespace"
        depends on (SYSVIPC || POSIX_MQUEUE)
index af9cda887a23f32d64b989ced85f8ca2928b0950..0ae9cc22f2ae255c55337812b121a9a2d2eec9ca 100644 (file)
@@ -387,12 +387,27 @@ static void __init get_fs_names(char *page)
        *s = '\0';
 }
 
-static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+static int __init do_mount_root(const char *name, const char *fs,
+                                const int flags, const void *data)
 {
        struct super_block *s;
-       int err = ksys_mount(name, "/root", fs, flags, data);
-       if (err)
-               return err;
+       struct page *p = NULL;
+       char *data_page = NULL;
+       int ret;
+
+       if (data) {
+               /* do_mount() requires a full page as fifth argument */
+               p = alloc_page(GFP_KERNEL);
+               if (!p)
+                       return -ENOMEM;
+               data_page = page_address(p);
+               /* zero-pad. do_mount() will make sure it's terminated */
+               strncpy(data_page, data, PAGE_SIZE);
+       }
+
+       ret = do_mount(name, "/root", fs, flags, data_page);
+       if (ret)
+               goto out;
 
        ksys_chdir("/root");
        s = current->fs->pwd.dentry->d_sb;
@@ -402,7 +417,11 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
               s->s_type->name,
               sb_rdonly(s) ? " readonly" : "",
               MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
-       return 0;
+
+out:
+       if (p)
+               put_page(p);
+       return ret;
 }
 
 void __init mount_block_root(char *name, int flags)
@@ -670,8 +689,8 @@ void __init prepare_namespace(void)
 
        mount_root();
 out:
-       devtmpfs_mount("dev");
-       ksys_mount(".", "/", NULL, MS_MOVE, NULL);
+       devtmpfs_mount();
+       do_mount(".", "/", NULL, MS_MOVE, NULL);
        ksys_chroot(".");
 }
 
index a9c6cc56f505e6210356b0c8d94e8aa90dbc2aa7..dab8b1151b5698719cc9bbb072cead21c8773181 100644 (file)
@@ -48,13 +48,10 @@ early_param("initrd", early_initrd);
 static int init_linuxrc(struct subprocess_info *info, struct cred *new)
 {
        ksys_unshare(CLONE_FS | CLONE_FILES);
-       /* stdin/stdout/stderr for /linuxrc */
-       ksys_open("/dev/console", O_RDWR, 0);
-       ksys_dup(0);
-       ksys_dup(0);
+       console_on_rootfs();
        /* move initrd over / and chdir/chroot in initrd root */
        ksys_chdir("/root");
-       ksys_mount(".", "/", NULL, MS_MOVE, NULL);
+       do_mount(".", "/", NULL, MS_MOVE, NULL);
        ksys_chroot(".");
        ksys_setsid();
        return 0;
@@ -89,7 +86,7 @@ static void __init handle_initrd(void)
        current->flags &= ~PF_FREEZER_SKIP;
 
        /* move initrd to rootfs' /old */
-       ksys_mount("..", ".", NULL, MS_MOVE, NULL);
+       do_mount("..", ".", NULL, MS_MOVE, NULL);
        /* switch root and cwd back to / of rootfs */
        ksys_chroot("..");
 
@@ -103,7 +100,7 @@ static void __init handle_initrd(void)
        mount_root();
 
        printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
-       error = ksys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
+       error = do_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
        if (!error)
                printk("okay\n");
        else {
index 91f6ebb30ef041a8667c18b949ccf42e976174e1..da1bc0b60a7de3119113a8dace48c79928f5a927 100644 (file)
@@ -553,6 +553,7 @@ static void __init mm_init(void)
         * bigger than MAX_ORDER unless SPARSEMEM.
         */
        page_ext_init_flatmem();
+       init_debug_pagealloc();
        report_meminit();
        mem_init();
        kmem_cache_init();
@@ -1155,6 +1156,17 @@ static int __ref kernel_init(void *unused)
              "See Linux Documentation/admin-guide/init.rst for guidance.");
 }
 
+void console_on_rootfs(void)
+{
+       /* Open the /dev/console as stdin, this should never fail */
+       if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+               pr_err("Warning: unable to open an initial console.\n");
+
+       /* create stdout/stderr */
+       (void) ksys_dup(0);
+       (void) ksys_dup(0);
+}
+
 static noinline void __init kernel_init_freeable(void)
 {
        /*
@@ -1190,12 +1202,8 @@ static noinline void __init kernel_init_freeable(void)
 
        do_basic_setup();
 
-       /* Open the /dev/console on the rootfs, this should never fail */
-       if (ksys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
-               pr_err("Warning: unable to open an initial console.\n");
+       console_on_rootfs();
 
-       (void) ksys_dup(0);
-       (void) ksys_dup(0);
        /*
         * check if there is an early userspace init.  If yes, let it do all
         * the work
index d126d156efc64e7d2d710197cf50377c12ad620d..915eacb9c059dd778c5ec6c19a66328db0a61968 100644 (file)
@@ -100,7 +100,7 @@ device_initcall(ipc_init);
 static const struct rhashtable_params ipc_kht_params = {
        .head_offset            = offsetof(struct kern_ipc_perm, khtnode),
        .key_offset             = offsetof(struct kern_ipc_perm, key),
-       .key_len                = FIELD_SIZEOF(struct kern_ipc_perm, key),
+       .key_len                = sizeof_field(struct kern_ipc_perm, key),
        .automatic_shrinking    = true,
 };
 
index e0852dc333acd728917ecb3cc1dcd8db72ec3687..3de8fd11873b476aa6d9538f869b306f5881a29a 100644 (file)
@@ -101,7 +101,7 @@ config UNINLINE_SPIN_UNLOCK
 # unlock and unlock_irq functions are inlined when:
 #   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
 #  or
-#   - DEBUG_SPINLOCK=n and PREEMPT=n
+#   - DEBUG_SPINLOCK=n and PREEMPTION=n
 #
 # unlock_bh and unlock_irqrestore functions are inlined when:
 #   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
@@ -139,7 +139,7 @@ config INLINE_SPIN_UNLOCK_BH
 
 config INLINE_SPIN_UNLOCK_IRQ
        def_bool y
-       depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_IRQ
+       depends on !PREEMPTION || ARCH_INLINE_SPIN_UNLOCK_IRQ
 
 config INLINE_SPIN_UNLOCK_IRQRESTORE
        def_bool y
@@ -168,7 +168,7 @@ config INLINE_READ_LOCK_IRQSAVE
 
 config INLINE_READ_UNLOCK
        def_bool y
-       depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK
+       depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK
 
 config INLINE_READ_UNLOCK_BH
        def_bool y
@@ -176,7 +176,7 @@ config INLINE_READ_UNLOCK_BH
 
 config INLINE_READ_UNLOCK_IRQ
        def_bool y
-       depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_IRQ
+       depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK_IRQ
 
 config INLINE_READ_UNLOCK_IRQRESTORE
        def_bool y
@@ -205,7 +205,7 @@ config INLINE_WRITE_LOCK_IRQSAVE
 
 config INLINE_WRITE_UNLOCK
        def_bool y
-       depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK
+       depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK
 
 config INLINE_WRITE_UNLOCK_BH
        def_bool y
@@ -213,7 +213,7 @@ config INLINE_WRITE_UNLOCK_BH
 
 config INLINE_WRITE_UNLOCK_IRQ
        def_bool y
-       depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_IRQ
+       depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK_IRQ
 
 config INLINE_WRITE_UNLOCK_IRQRESTORE
        def_bool y
index 8e09f0f55b4b0f8a0b1b517e19463f8df0d8a6d2..17b0d523afb35cea50ee0ea8c5063c08b4cd209e 100644 (file)
@@ -102,12 +102,13 @@ struct audit_net {
  * This struct is RCU protected; you must either hold the RCU lock for reading
  * or the associated spinlock for writing.
  */
-static struct auditd_connection {
+struct auditd_connection {
        struct pid *pid;
        u32 portid;
        struct net *net;
        struct rcu_head rcu;
-} *auditd_conn = NULL;
+};
+static struct auditd_connection __rcu *auditd_conn;
 static DEFINE_SPINLOCK(auditd_conn_lock);
 
 /* If audit_rate_limit is non-zero, limit the rate of sending audit records
index 7d40da240891d3b0885ad97d1c43b0fa50611081..ed20758847247c988c41cbd5610368160b22b358 100644 (file)
@@ -3470,6 +3470,7 @@ static u8 bpf_ctx_convert_map[] = {
        [_id] = __ctx_convert##_id,
 #include <linux/bpf_types.h>
 #undef BPF_PROG_TYPE
+       0, /* avoid empty array */
 };
 #undef BPF_MAP_TYPE
 
index 9f90d3c92bdaca011f5645aa326fab4b09166d70..9e43b72eb619c06fbf8481b9de71e69ed2ecc158 100644 (file)
@@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
  */
 static void cgroup_bpf_release(struct work_struct *work)
 {
-       struct cgroup *cgrp = container_of(work, struct cgroup,
-                                          bpf.release_work);
+       struct cgroup *p, *cgrp = container_of(work, struct cgroup,
+                                              bpf.release_work);
        enum bpf_cgroup_storage_type stype;
        struct bpf_prog_array *old_array;
        unsigned int type;
@@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work)
 
        mutex_unlock(&cgroup_mutex);
 
+       for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+               cgroup_bpf_put(p);
+
        percpu_ref_exit(&cgrp->bpf.refcnt);
        cgroup_put(cgrp);
 }
@@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
  */
 #define        NR ARRAY_SIZE(cgrp->bpf.effective)
        struct bpf_prog_array *arrays[NR] = {};
+       struct cgroup *p;
        int ret, i;
 
        ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
@@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
        if (ret)
                return ret;
 
+       for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+               cgroup_bpf_get(p);
+
        for (i = 0; i < NR; i++)
                INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
 
@@ -1341,7 +1348,7 @@ static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_LDX_MEM(
                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
                        bpf_target_off(struct bpf_sysctl_kern, write,
-                                      FIELD_SIZEOF(struct bpf_sysctl_kern,
+                                      sizeof_field(struct bpf_sysctl_kern,
                                                    write),
                                       target_size));
                break;
index 49e32acad7d80e310376c137453fffd97ac234c6..af6b738cf435cce5b7377eaf1aaf57e7ae8260a4 100644 (file)
@@ -2043,23 +2043,28 @@ static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
        for_each_cgroup_storage_type(stype) {
                if (!aux->cgroup_storage[stype])
                        continue;
-               bpf_cgroup_storage_release(aux->prog,
-                                          aux->cgroup_storage[stype]);
+               bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]);
        }
 }
 
-static void bpf_free_used_maps(struct bpf_prog_aux *aux)
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+                         struct bpf_map **used_maps, u32 len)
 {
        struct bpf_map *map;
-       int i;
+       u32 i;
 
        bpf_free_cgroup_storage(aux);
-       for (i = 0; i < aux->used_map_cnt; i++) {
-               map = aux->used_maps[i];
+       for (i = 0; i < len; i++) {
+               map = used_maps[i];
                if (map->ops->map_poke_untrack)
                        map->ops->map_poke_untrack(map, aux);
                bpf_map_put(map);
        }
+}
+
+static void bpf_free_used_maps(struct bpf_prog_aux *aux)
+{
+       __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
        kfree(aux->used_maps);
 }
 
index 2ba750725cb26d18fe19cac08a30e0e6f48a9e86..33d01866bcc2c014ace7faddaa0b4f6fbee8f8a4 100644 (file)
@@ -20,7 +20,7 @@ struct bpf_cgroup_storage_map {
        struct bpf_map map;
 
        spinlock_t lock;
-       struct bpf_prog *prog;
+       struct bpf_prog_aux *aux;
        struct rb_root root;
        struct list_head list;
 };
@@ -357,7 +357,7 @@ static int cgroup_storage_check_btf(const struct bpf_map *map,
         * The first field must be a 64 bit integer at 0 offset.
         */
        m = (struct btf_member *)(key_type + 1);
-       size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
+       size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
        if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
                return -EINVAL;
 
@@ -366,7 +366,7 @@ static int cgroup_storage_check_btf(const struct bpf_map *map,
         */
        m++;
        offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
-       size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
+       size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
        if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
                return -EINVAL;
 
@@ -420,7 +420,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
        .map_seq_show_elem = cgroup_storage_seq_show_elem,
 };
 
-int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
+int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
 {
        enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
        struct bpf_cgroup_storage_map *map = map_to_storage(_map);
@@ -428,14 +428,14 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
 
        spin_lock_bh(&map->lock);
 
-       if (map->prog && map->prog != prog)
+       if (map->aux && map->aux != aux)
                goto unlock;
-       if (prog->aux->cgroup_storage[stype] &&
-           prog->aux->cgroup_storage[stype] != _map)
+       if (aux->cgroup_storage[stype] &&
+           aux->cgroup_storage[stype] != _map)
                goto unlock;
 
-       map->prog = prog;
-       prog->aux->cgroup_storage[stype] = _map;
+       map->aux = aux;
+       aux->cgroup_storage[stype] = _map;
        ret = 0;
 unlock:
        spin_unlock_bh(&map->lock);
@@ -443,16 +443,16 @@ unlock:
        return ret;
 }
 
-void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
+void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *_map)
 {
        enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
        struct bpf_cgroup_storage_map *map = map_to_storage(_map);
 
        spin_lock_bh(&map->lock);
-       if (map->prog == prog) {
-               WARN_ON(prog->aux->cgroup_storage[stype] != _map);
-               map->prog = NULL;
-               prog->aux->cgroup_storage[stype] = NULL;
+       if (map->aux == aux) {
+               WARN_ON(aux->cgroup_storage[stype] != _map);
+               map->aux = NULL;
+               aux->cgroup_storage[stype] = NULL;
        }
        spin_unlock_bh(&map->lock);
 }
index ca52b9642943ffe6c4567539df95e810437f35e1..d4f335a9a89989f48e06588d1ff513d5bb30cf86 100644 (file)
@@ -44,14 +44,19 @@ struct tnum tnum_rshift(struct tnum a, u8 shift)
        return TNUM(a.value >> shift, a.mask >> shift);
 }
 
-struct tnum tnum_arshift(struct tnum a, u8 min_shift)
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness)
 {
        /* if a.value is negative, arithmetic shifting by minimum shift
         * will have larger negative offset compared to more shifting.
         * If a.value is nonnegative, arithmetic shifting by minimum shift
         * will have larger positive offset compare to more shifting.
         */
-       return TNUM((s64)a.value >> min_shift, (s64)a.mask >> min_shift);
+       if (insn_bitness == 32)
+               return TNUM((u32)(((s32)a.value) >> min_shift),
+                           (u32)(((s32)a.mask)  >> min_shift));
+       else
+               return TNUM((s64)a.value >> min_shift,
+                           (s64)a.mask  >> min_shift);
 }
 
 struct tnum tnum_add(struct tnum a, struct tnum b)
index 7e89f1f49d7712d6f530bba2c6572b57ae702b1f..23b0d5cfd47ebff7b2cb4903ab305324817d4541 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/hash.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <linux/ftrace.h>
 
 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
 #define TRAMPOLINE_HASH_BITS 10
@@ -59,6 +60,60 @@ out:
        return tr;
 }
 
+static int is_ftrace_location(void *ip)
+{
+       long addr;
+
+       addr = ftrace_location((long)ip);
+       if (!addr)
+               return 0;
+       if (WARN_ON_ONCE(addr != (long)ip))
+               return -EFAULT;
+       return 1;
+}
+
+static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
+{
+       void *ip = tr->func.addr;
+       int ret;
+
+       if (tr->func.ftrace_managed)
+               ret = unregister_ftrace_direct((long)ip, (long)old_addr);
+       else
+               ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
+       return ret;
+}
+
+static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
+{
+       void *ip = tr->func.addr;
+       int ret;
+
+       if (tr->func.ftrace_managed)
+               ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
+       else
+               ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
+       return ret;
+}
+
+/* first time registering */
+static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
+{
+       void *ip = tr->func.addr;
+       int ret;
+
+       ret = is_ftrace_location(ip);
+       if (ret < 0)
+               return ret;
+       tr->func.ftrace_managed = ret;
+
+       if (tr->func.ftrace_managed)
+               ret = register_ftrace_direct((long)ip, (long)new_addr);
+       else
+               ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
+       return ret;
+}
+
 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  * bytes on x86.  Pick a number to fit into PAGE_SIZE / 2
  */
@@ -77,8 +132,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
        int err;
 
        if (fentry_cnt + fexit_cnt == 0) {
-               err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
-                                        old_image, NULL);
+               err = unregister_fentry(tr, old_image);
                tr->selector = 0;
                goto out;
        }
@@ -105,12 +159,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
 
        if (tr->selector)
                /* progs already running at this address */
-               err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
-                                        old_image, new_image);
+               err = modify_fentry(tr, old_image, new_image);
        else
                /* first time registering */
-               err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
-                                        new_image);
+               err = register_fentry(tr, new_image);
        if (err)
                goto out;
        tr->selector++;
index 034ef81f935b0192550f75ca228b86444f3f1fce..7d530ce8719d8d06efc9498f70e13f35c700ccd9 100644 (file)
@@ -907,7 +907,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
        BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 };
 
-static void __mark_reg_not_init(struct bpf_reg_state *reg);
+static void __mark_reg_not_init(const struct bpf_verifier_env *env,
+                               struct bpf_reg_state *reg);
 
 /* Mark the unknown part of a register (variable offset or scalar value) as
  * known to have the value @imm.
@@ -945,7 +946,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
                verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
                /* Something bad happened, let's kill all regs */
                for (regno = 0; regno < MAX_BPF_REG; regno++)
-                       __mark_reg_not_init(regs + regno);
+                       __mark_reg_not_init(env, regs + regno);
                return;
        }
        __mark_reg_known_zero(regs + regno);
@@ -1054,7 +1055,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
 }
 
 /* Mark a register as having a completely unknown (scalar) value. */
-static void __mark_reg_unknown(struct bpf_reg_state *reg)
+static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+                              struct bpf_reg_state *reg)
 {
        /*
         * Clear type, id, off, and union(map_ptr, range) and
@@ -1064,6 +1066,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
        reg->type = SCALAR_VALUE;
        reg->var_off = tnum_unknown;
        reg->frameno = 0;
+       reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
+                      true : false;
        __mark_reg_unbounded(reg);
 }
 
@@ -1074,19 +1078,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
                verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
                /* Something bad happened, let's kill all regs except FP */
                for (regno = 0; regno < BPF_REG_FP; regno++)
-                       __mark_reg_not_init(regs + regno);
+                       __mark_reg_not_init(env, regs + regno);
                return;
        }
-       regs += regno;
-       __mark_reg_unknown(regs);
-       /* constant backtracking is enabled for root without bpf2bpf calls */
-       regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
-                       true : false;
+       __mark_reg_unknown(env, regs + regno);
 }
 
-static void __mark_reg_not_init(struct bpf_reg_state *reg)
+static void __mark_reg_not_init(const struct bpf_verifier_env *env,
+                               struct bpf_reg_state *reg)
 {
-       __mark_reg_unknown(reg);
+       __mark_reg_unknown(env, reg);
        reg->type = NOT_INIT;
 }
 
@@ -1097,10 +1098,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
                verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
                /* Something bad happened, let's kill all regs except FP */
                for (regno = 0; regno < BPF_REG_FP; regno++)
-                       __mark_reg_not_init(regs + regno);
+                       __mark_reg_not_init(env, regs + regno);
                return;
        }
-       __mark_reg_not_init(regs + regno);
+       __mark_reg_not_init(env, regs + regno);
 }
 
 #define DEF_NOT_SUBREG (0)
@@ -3234,7 +3235,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                }
                if (state->stack[spi].slot_type[0] == STACK_SPILL &&
                    state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
-                       __mark_reg_unknown(&state->stack[spi].spilled_ptr);
+                       __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
                        for (j = 0; j < BPF_REG_SIZE; j++)
                                state->stack[spi].slot_type[j] = STACK_MISC;
                        goto mark;
@@ -3892,7 +3893,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
                if (!reg)
                        continue;
                if (reg_is_pkt_pointer_any(reg))
-                       __mark_reg_unknown(reg);
+                       __mark_reg_unknown(env, reg);
        }
 }
 
@@ -3920,7 +3921,7 @@ static void release_reg_references(struct bpf_verifier_env *env,
                if (!reg)
                        continue;
                if (reg->ref_obj_id == ref_obj_id)
-                       __mark_reg_unknown(reg);
+                       __mark_reg_unknown(env, reg);
        }
 }
 
@@ -4134,6 +4135,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
        struct bpf_map *map = meta->map_ptr;
        struct tnum range;
        u64 val;
+       int err;
 
        if (func_id != BPF_FUNC_tail_call)
                return 0;
@@ -4150,6 +4152,10 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
                return 0;
        }
 
+       err = mark_chain_precision(env, BPF_REG_3);
+       if (err)
+               return err;
+
        val = reg->var_off.value;
        if (bpf_map_key_unseen(aux))
                bpf_map_key_store(aux, val);
@@ -4577,7 +4583,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                /* Taint dst register if offset had invalid bounds derived from
                 * e.g. dead branches.
                 */
-               __mark_reg_unknown(dst_reg);
+               __mark_reg_unknown(env, dst_reg);
                return 0;
        }
 
@@ -4829,13 +4835,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                /* Taint dst register if offset had invalid bounds derived from
                 * e.g. dead branches.
                 */
-               __mark_reg_unknown(dst_reg);
+               __mark_reg_unknown(env, dst_reg);
                return 0;
        }
 
        if (!src_known &&
            opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
-               __mark_reg_unknown(dst_reg);
+               __mark_reg_unknown(env, dst_reg);
                return 0;
        }
 
@@ -5043,9 +5049,16 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                /* Upon reaching here, src_known is true and
                 * umax_val is equal to umin_val.
                 */
-               dst_reg->smin_value >>= umin_val;
-               dst_reg->smax_value >>= umin_val;
-               dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
+               if (insn_bitness == 32) {
+                       dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
+                       dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
+               } else {
+                       dst_reg->smin_value >>= umin_val;
+                       dst_reg->smax_value >>= umin_val;
+               }
+
+               dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
+                                               insn_bitness);
 
                /* blow away the dst_reg umin_value/umax_value and rely on
                 * dst_reg var_off to refine the result.
@@ -6258,6 +6271,7 @@ static bool may_access_skb(enum bpf_prog_type type)
 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
 {
        struct bpf_reg_state *regs = cur_regs(env);
+       static const int ctx_reg = BPF_REG_6;
        u8 mode = BPF_MODE(insn->code);
        int i, err;
 
@@ -6291,7 +6305,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
        }
 
        /* check whether implicit source operand (register R6) is readable */
-       err = check_reg_arg(env, BPF_REG_6, SRC_OP);
+       err = check_reg_arg(env, ctx_reg, SRC_OP);
        if (err)
                return err;
 
@@ -6310,7 +6324,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
                return -EINVAL;
        }
 
-       if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+       if (regs[ctx_reg].type != PTR_TO_CTX) {
                verbose(env,
                        "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
                return -EINVAL;
@@ -6323,6 +6337,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
                        return err;
        }
 
+       err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
+       if (err < 0)
+               return err;
+
        /* reset caller saved regs to unreadable */
        for (i = 0; i < CALLER_SAVED_REGS; i++) {
                mark_reg_not_init(env, regs, caller_saved[i]);
@@ -6977,7 +6995,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
                        /* since the register is unused, clear its state
                         * to make further comparison simpler
                         */
-                       __mark_reg_not_init(&st->regs[i]);
+                       __mark_reg_not_init(env, &st->regs[i]);
        }
 
        for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
@@ -6985,7 +7003,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
                /* liveness must not touch this stack slot anymore */
                st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
                if (!(live & REG_LIVE_READ)) {
-                       __mark_reg_not_init(&st->stack[i].spilled_ptr);
+                       __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
                        for (j = 0; j < BPF_REG_SIZE; j++)
                                st->stack[i].slot_type[j] = STACK_INVALID;
                }
@@ -8268,7 +8286,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
                        env->used_maps[env->used_map_cnt++] = map;
 
                        if (bpf_map_is_cgroup_storage(map) &&
-                           bpf_cgroup_storage_assign(env->prog, map)) {
+                           bpf_cgroup_storage_assign(env->prog->aux, map)) {
                                verbose(env, "only one cgroup storage of each type is allowed\n");
                                fdput(f);
                                return -EBUSY;
@@ -8298,18 +8316,8 @@ next_insn:
 /* drop refcnt of maps used by the rejected program */
 static void release_maps(struct bpf_verifier_env *env)
 {
-       enum bpf_cgroup_storage_type stype;
-       int i;
-
-       for_each_cgroup_storage_type(stype) {
-               if (!env->prog->aux->cgroup_storage[stype])
-                       continue;
-               bpf_cgroup_storage_release(env->prog,
-                       env->prog->aux->cgroup_storage[stype]);
-       }
-
-       for (i = 0; i < env->used_map_cnt; i++)
-               bpf_map_put(env->used_maps[i]);
+       __bpf_free_used_maps(env->prog->aux, env->used_maps,
+                            env->used_map_cnt);
 }
 
 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
@@ -9282,7 +9290,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                        insn->code = BPF_JMP | BPF_TAIL_CALL;
 
                        aux = &env->insn_aux_data[i + delta];
-                       if (prog->jit_requested && !expect_blinding &&
+                       if (env->allow_ptr_leaks && !expect_blinding &&
+                           prog->jit_requested &&
                            !bpf_map_key_poisoned(aux) &&
                            !bpf_map_ptr_poisoned(aux) &&
                            !bpf_map_ptr_unpriv(aux)) {
index 735af8f15f95406d8d468d2fbacd3ff7ed2a715e..1e12e6928bca5aae23f26da3b6f1cae2b4a8758e 100644 (file)
@@ -3055,8 +3055,6 @@ static int cgroup_apply_control_enable(struct cgroup *cgrp)
                for_each_subsys(ss, ssid) {
                        struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
 
-                       WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
-
                        if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
                                continue;
 
@@ -3066,6 +3064,8 @@ static int cgroup_apply_control_enable(struct cgroup *cgrp)
                                        return PTR_ERR(css);
                        }
 
+                       WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
+
                        if (css_visible(css)) {
                                ret = css_populate_dir(css);
                                if (ret)
@@ -3101,11 +3101,11 @@ static void cgroup_apply_control_disable(struct cgroup *cgrp)
                for_each_subsys(ss, ssid) {
                        struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
 
-                       WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
-
                        if (!css)
                                continue;
 
+                       WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
+
                        if (css->parent &&
                            !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
                                kill_css(css);
@@ -3392,7 +3392,8 @@ static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf,
        if (strcmp(strstrip(buf), "threaded"))
                return -EINVAL;
 
-       cgrp = cgroup_kn_lock_live(of->kn, false);
+       /* drain dying csses before we re-apply (threaded) subtree control */
+       cgrp = cgroup_kn_lock_live(of->kn, true);
        if (!cgrp)
                return -ENOENT;
 
index b48b22d4deb695a8432955e942149ad3e91abef0..6f87352f8219cddbd96e3c439e88c3910c15fa7f 100644 (file)
@@ -33,7 +33,7 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
                return;
 
        /*
-        * Paired with the one in cgroup_rstat_cpu_pop_upated().  Either we
+        * Paired with the one in cgroup_rstat_cpu_pop_updated().  Either we
         * see NULL updated_next or they see our updated stat.
         */
        smp_mb();
index a59cc980adadb56cbe04ba842ed80c34289748c1..9c706af713fbc3c8ee6fa0daf5805b607a92faad 100644 (file)
@@ -525,8 +525,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
        if (WARN_ON_ONCE((!cpu_online(cpu))))
                return -ECANCELED;
 
-       /* Unpark the stopper thread and the hotplug thread of the target cpu */
-       stop_machine_unpark(cpu);
+       /* Unpark the hotplug thread of the target cpu */
        kthread_unpark(st->thread);
 
        /*
@@ -1089,8 +1088,8 @@ void notify_cpu_starting(unsigned int cpu)
 
 /*
  * Called from the idle task. Wake up the controlling task which brings the
- * stopper and the hotplug thread of the upcoming CPU up and then delegates
- * the rest of the online bringup to the hotplug thread.
+ * hotplug thread of the upcoming CPU up and then delegates the rest of the
+ * online bringup to the hotplug thread.
  */
 void cpuhp_online_idle(enum cpuhp_state state)
 {
@@ -1100,6 +1099,12 @@ void cpuhp_online_idle(enum cpuhp_state state)
        if (state != CPUHP_AP_ONLINE_IDLE)
                return;
 
+       /*
+        * Unpart the stopper thread before we start the idle loop (and start
+        * scheduling); this ensures the stopper task is always available.
+        */
+       stop_machine_unpark(smp_processor_id());
+
        st->state = CPUHP_AP_ONLINE_IDLE;
        complete_ap_thread(st, true);
 }
@@ -1909,6 +1914,78 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
 
+#ifdef CONFIG_HOTPLUG_SMT
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = true;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static void cpuhp_online_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = false;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
+int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       for_each_online_cpu(cpu) {
+               if (topology_is_primary_thread(cpu))
+                       continue;
+               ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+               if (ret)
+                       break;
+               /*
+                * As this needs to hold the cpu maps lock it's impossible
+                * to call device_offline() because that ends up calling
+                * cpu_down() which takes cpu maps lock. cpu maps lock
+                * needs to be held as this might race against in kernel
+                * abusers of the hotplug machinery (thermal management).
+                *
+                * So nothing would update device:offline state. That would
+                * leave the sysfs entry stale and prevent onlining after
+                * smt control has been changed to 'off' again. This is
+                * called under the sysfs hotplug lock, so it is properly
+                * serialized against the regular offline usage.
+                */
+               cpuhp_offline_cpu_device(cpu);
+       }
+       if (!ret)
+               cpu_smt_control = ctrlval;
+       cpu_maps_update_done();
+       return ret;
+}
+
+int cpuhp_smt_enable(void)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       cpu_smt_control = CPU_SMT_ENABLED;
+       for_each_present_cpu(cpu) {
+               /* Skip online CPUs and CPUs on offline nodes */
+               if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+                       continue;
+               ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+               if (ret)
+                       break;
+               /* See comment in cpuhp_smt_disable() */
+               cpuhp_online_cpu_device(cpu);
+       }
+       cpu_maps_update_done();
+       return ret;
+}
+#endif
+
 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
 static ssize_t show_cpuhp_state(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -2063,77 +2140,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
 
 #ifdef CONFIG_HOTPLUG_SMT
 
-static void cpuhp_offline_cpu_device(unsigned int cpu)
-{
-       struct device *dev = get_cpu_device(cpu);
-
-       dev->offline = true;
-       /* Tell user space about the state change */
-       kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
-}
-
-static void cpuhp_online_cpu_device(unsigned int cpu)
-{
-       struct device *dev = get_cpu_device(cpu);
-
-       dev->offline = false;
-       /* Tell user space about the state change */
-       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
-}
-
-int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
-{
-       int cpu, ret = 0;
-
-       cpu_maps_update_begin();
-       for_each_online_cpu(cpu) {
-               if (topology_is_primary_thread(cpu))
-                       continue;
-               ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
-               if (ret)
-                       break;
-               /*
-                * As this needs to hold the cpu maps lock it's impossible
-                * to call device_offline() because that ends up calling
-                * cpu_down() which takes cpu maps lock. cpu maps lock
-                * needs to be held as this might race against in kernel
-                * abusers of the hotplug machinery (thermal management).
-                *
-                * So nothing would update device:offline state. That would
-                * leave the sysfs entry stale and prevent onlining after
-                * smt control has been changed to 'off' again. This is
-                * called under the sysfs hotplug lock, so it is properly
-                * serialized against the regular offline usage.
-                */
-               cpuhp_offline_cpu_device(cpu);
-       }
-       if (!ret)
-               cpu_smt_control = ctrlval;
-       cpu_maps_update_done();
-       return ret;
-}
-
-int cpuhp_smt_enable(void)
-{
-       int cpu, ret = 0;
-
-       cpu_maps_update_begin();
-       cpu_smt_control = CPU_SMT_ENABLED;
-       for_each_present_cpu(cpu) {
-               /* Skip online CPUs and CPUs on offline nodes */
-               if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
-                       continue;
-               ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
-               if (ret)
-                       break;
-               /* See comment in cpuhp_smt_disable() */
-               cpuhp_online_cpu_device(cpu);
-       }
-       cpu_maps_update_done();
-       return ret;
-}
-
-
 static ssize_t
 __store_smt_control(struct device *dev, struct device_attribute *attr,
                    const char *buf, size_t count)
index c0a4c12d38b20ed80254c76acb28c6d060b1df86..809a985b17934ae7e01a63621c5ca49ce03577ad 100644 (file)
@@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk)
        put_cred(cred);
 
 #ifdef CONFIG_KEYS_REQUEST_CACHE
-       key_put(current->cached_requested_key);
-       current->cached_requested_key = NULL;
+       key_put(tsk->cached_requested_key);
+       tsk->cached_requested_key = NULL;
 #endif
 }
 
@@ -223,7 +223,7 @@ struct cred *cred_alloc_blank(void)
        new->magic = CRED_MAGIC;
 #endif
 
-       if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+       if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
        return new;
@@ -282,7 +282,7 @@ struct cred *prepare_creds(void)
        new->security = NULL;
 #endif
 
-       if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
        validate_creds(new);
        return new;
@@ -715,7 +715,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
 #ifdef CONFIG_SECURITY
        new->security = NULL;
 #endif
-       if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
                goto error;
 
        put_cred(old);
index 4ff86d57f9e5309905e7e5cffe5a1eb875b45e30..2173c23c25b4fcef30b27059a1c799a697d217b1 100644 (file)
@@ -10523,7 +10523,7 @@ again:
                goto unlock;
        }
 
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
+       list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
                ret = perf_try_init_event(pmu, event);
                if (!ret)
                        goto unlock;
@@ -11465,8 +11465,10 @@ SYSCALL_DEFINE5(perf_event_open,
                }
        }
 
-       if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader))
+       if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
+               err = -EINVAL;
                goto err_locked;
+       }
 
        /*
         * Must be under the same ctx::mutex as perf_install_in_context(),
index bcbd59888e67bbd516767cb485c1d63dd1bb7075..2833ffb0c2110c482010954e73ed836395eaa6a6 100644 (file)
@@ -517,10 +517,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father,
        }
 
        write_unlock_irq(&tasklist_lock);
-       if (unlikely(pid_ns == &init_pid_ns)) {
-               panic("Attempted to kill init! exitcode=0x%08x\n",
-                       father->signal->group_exit_code ?: father->exit_code);
-       }
 
        list_for_each_entry_safe(p, n, dead, ptrace_entry) {
                list_del_init(&p->ptrace_entry);
@@ -766,6 +762,14 @@ void __noreturn do_exit(long code)
        acct_update_integrals(tsk);
        group_dead = atomic_dec_and_test(&tsk->signal->live);
        if (group_dead) {
+               /*
+                * If the last thread of global init has exited, panic
+                * immediately to get a useable coredump.
+                */
+               if (unlikely(is_global_init(tsk)))
+                       panic("Attempted to kill init! exitcode=0x%08x\n",
+                               tsk->signal->group_exit_code ?: (int)code);
+
 #ifdef CONFIG_POSIX_TIMERS
                hrtimer_cancel(&tsk->signal->real_timer);
                exit_itimers(tsk->signal);
index 2508a4f238a3f3b2e41dd78502a5a653f57c221b..ef82feb4bddc79b77e9c8d29259384dd307abe7c 100644 (file)
@@ -1832,6 +1832,7 @@ static __latent_entropy struct task_struct *copy_process(
        struct multiprocess_signals delayed;
        struct file *pidfile = NULL;
        u64 clone_flags = args->flags;
+       struct nsproxy *nsp = current->nsproxy;
 
        /*
         * Don't allow sharing the root directory with processes in a different
@@ -1874,8 +1875,16 @@ static __latent_entropy struct task_struct *copy_process(
         */
        if (clone_flags & CLONE_THREAD) {
                if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
-                   (task_active_pid_ns(current) !=
-                               current->nsproxy->pid_ns_for_children))
+                   (task_active_pid_ns(current) != nsp->pid_ns_for_children))
+                       return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * If the new process will be in a different time namespace
+        * do not allow it to share VM or a thread group with the forking task.
+        */
+       if (clone_flags & (CLONE_THREAD | CLONE_VM)) {
+               if (nsp->time_ns != nsp->time_ns_for_children)
                        return ERR_PTR(-EINVAL);
        }
 
@@ -2578,6 +2587,16 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 #endif
 
 #ifdef __ARCH_WANT_SYS_CLONE3
+
+/*
+ * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from
+ * the registers containing the syscall arguments for clone. This doesn't work
+ * with clone3 since the TLS value is passed in clone_args instead.
+ */
+#ifndef CONFIG_HAVE_COPY_THREAD_TLS
+#error clone3 requires copy_thread_tls support in arch
+#endif
+
 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
                                              struct clone_args __user *uargs,
                                              size_t usize)
@@ -2811,7 +2830,8 @@ static int check_unshare_flags(unsigned long unshare_flags)
        if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
                                CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
                                CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
-                               CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
+                               CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
+                               CLONE_NEWTIME))
                return -EINVAL;
        /*
         * Not implemented, but pretend it works if there is nothing
index 03c518e9747e579869e13b7e2bc91ad83e3e8da7..0cf84c8664f207c574325b899ef2e57f01295a94 100644 (file)
@@ -1178,6 +1178,7 @@ out_error:
 
 /**
  * wait_for_owner_exiting - Block until the owner has exited
+ * @ret: owner's current futex lock status
  * @exiting:   Pointer to the exiting task
  *
  * Caller must hold a refcount on @exiting.
index 6c7ca2e983a595ff561396e3d3b9400072426c56..02236b13b359974e95a2e3ca4ec3f0cadfd0962a 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/interrupt.h>
 #include <linux/ratelimit.h>
 #include <linux/irq.h>
+#include <linux/sched/isolation.h>
 
 #include "internals.h"
 
@@ -171,6 +172,20 @@ void irq_migrate_all_off_this_cpu(void)
        }
 }
 
+static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
+{
+       const struct cpumask *hk_mask;
+
+       if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
+               return false;
+
+       hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
+       if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
+               return false;
+
+       return cpumask_test_cpu(cpu, hk_mask);
+}
+
 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
 {
        struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -188,9 +203,11 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
        /*
         * If the interrupt can only be directed to a single target
         * CPU then it is already assigned to a CPU in the affinity
-        * mask. No point in trying to move it around.
+        * mask. No point in trying to move it around unless the
+        * isolation mechanism requests to move it to an upcoming
+        * housekeeping CPU.
         */
-       if (!irqd_is_single_target(data))
+       if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
                irq_set_affinity_locked(data, affinity, false);
 }
 
index 5b8fdd659e54abaf411e8e133296b4d8d25a7d49..98a5f10d19002f049d11e031472c97460d5fe00c 100644 (file)
@@ -891,6 +891,7 @@ __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
 }
 
 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
+       __releases(&desc->lock)
 {
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        if (bus)
index dd822fd8a7d5b7d308b997eccbd9c58410091b69..7527e5ef6fe59b4625f9bf8819a7e565a86e6070 100644 (file)
@@ -986,6 +986,23 @@ const struct irq_domain_ops irq_domain_simple_ops = {
 };
 EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
 
+/**
+ * irq_domain_translate_onecell() - Generic translate for direct one cell
+ * bindings
+ */
+int irq_domain_translate_onecell(struct irq_domain *d,
+                                struct irq_fwspec *fwspec,
+                                unsigned long *out_hwirq,
+                                unsigned int *out_type)
+{
+       if (WARN_ON(fwspec->param_count < 1))
+               return -EINVAL;
+       *out_hwirq = fwspec->param[0];
+       *out_type = IRQ_TYPE_NONE;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
+
 /**
  * irq_domain_translate_twocell() - Generic translate for direct two cell
  * bindings
@@ -1459,6 +1476,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
        if (rv) {
                /* Restore the original irq_data. */
                *root_irq_data = *child_irq_data;
+               kfree(child_irq_data);
                goto error;
        }
 
index 1753486b440cb031315ac089d5e3af2b9b3d4a24..818b2802d3e79f8a8d358f212b957f7298fe5318 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
 #include <linux/sched/task.h>
+#include <linux/sched/isolation.h>
 #include <uapi/linux/sched/types.h>
 #include <linux/task_work.h>
 
@@ -217,7 +218,45 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        if (!chip || !chip->irq_set_affinity)
                return -EINVAL;
 
-       ret = chip->irq_set_affinity(data, mask, force);
+       /*
+        * If this is a managed interrupt and housekeeping is enabled on
+        * it check whether the requested affinity mask intersects with
+        * a housekeeping CPU. If so, then remove the isolated CPUs from
+        * the mask and just keep the housekeeping CPU(s). This prevents
+        * the affinity setter from routing the interrupt to an isolated
+        * CPU to avoid that I/O submitted from a housekeeping CPU causes
+        * interrupts on an isolated one.
+        *
+        * If the masks do not intersect or include online CPU(s) then
+        * keep the requested mask. The isolated target CPUs are only
+        * receiving interrupts when the I/O operation was submitted
+        * directly from them.
+        *
+        * If all housekeeping CPUs in the affinity mask are offline, the
+        * interrupt will be migrated by the CPU hotplug code once a
+        * housekeeping CPU which belongs to the affinity mask comes
+        * online.
+        */
+       if (irqd_affinity_is_managed(data) &&
+           housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
+               const struct cpumask *hk_mask, *prog_mask;
+
+               static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
+               static struct cpumask tmp_mask;
+
+               hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
+
+               raw_spin_lock(&tmp_mask_lock);
+               cpumask_and(&tmp_mask, mask, hk_mask);
+               if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
+                       prog_mask = mask;
+               else
+                       prog_mask = &tmp_mask;
+               ret = chip->irq_set_affinity(data, prog_mask, force);
+               raw_spin_unlock(&tmp_mask_lock);
+       } else {
+               ret = chip->irq_set_affinity(data, mask, force);
+       }
        switch (ret) {
        case IRQ_SET_MASK_OK:
        case IRQ_SET_MASK_OK_DONE:
@@ -1500,8 +1539,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * has. The type flags are unreliable as the
                 * underlying chip implementation can override them.
                 */
-               pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
-                      irq);
+               pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
+                      new->name, irq);
                ret = -EINVAL;
                goto out_unlock;
        }
index 2ed97a7c9b2a68f4e5013ccc4b10a4ec082adb5a..f865e5f4d3825461bc4e1994eb4da80981b7e3fc 100644 (file)
@@ -34,6 +34,7 @@ static atomic_t irq_poll_active;
  * true and let the handler run.
  */
 bool irq_wait_for_poll(struct irq_desc *desc)
+       __must_hold(&desc->lock)
 {
        if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
                      "irq poll in progress on cpu %d for irq %d\n",
index bc933c0db9bf019aefa7c5bc7061bc2541fb2a00..f977786fe498fb7241337c6da0e94e3a0ad85d34 100644 (file)
@@ -159,6 +159,10 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 
        kimage_terminate(image);
 
+       ret = machine_kexec_post_load(image);
+       if (ret)
+               goto out;
+
        /* Install the new kernel and uninstall the old */
        image = xchg(dest_image, image);
 
index 15d70a90b50dcaee7e4345c45fe95d825a34cd8d..c19c0dad1ebef9def30f749a0a5cb13fac609a3f 100644 (file)
@@ -589,6 +589,12 @@ static void kimage_free_extra_pages(struct kimage *image)
        kimage_free_page_list(&image->unusable_pages);
 
 }
+
+int __weak machine_kexec_post_load(struct kimage *image)
+{
+       return 0;
+}
+
 void kimage_terminate(struct kimage *image)
 {
        if (*image->entry != 0)
@@ -1171,7 +1177,7 @@ int kernel_kexec(void)
                 * CPU hotplug again; so re-enable it here.
                 */
                cpu_hotplug_enable();
-               pr_emerg("Starting new kernel\n");
+               pr_notice("Starting new kernel\n");
                machine_shutdown();
        }
 
index a2df939486655ed22b811a6a311744f2c1c61113..faa74d5f69411b420c7ea47cc58f1476ca0eafe7 100644 (file)
@@ -441,6 +441,10 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
 
        kimage_terminate(image);
 
+       ret = machine_kexec_post_load(image);
+       if (ret)
+               goto out;
+
        /*
         * Free up any temporary buffers allocated which are not needed
         * after image has been loaded
index 48aaf2ac0d0d15d7cd1dca3c42fa9b2f00313c9e..39d30ccf8d8798c1113ea70889ea106ecb25ad1f 100644 (file)
@@ -13,6 +13,8 @@ void kimage_terminate(struct kimage *image);
 int kimage_is_destination_range(struct kimage *image,
                                unsigned long start, unsigned long end);
 
+int machine_kexec_post_load(struct kimage *image);
+
 extern struct mutex kexec_mutex;
 
 #ifdef CONFIG_KEXEC_FILE
index 53534aa258a60ec3ba7dad1cf3551e26d8f19700..2625c241ac00f81d5385be6024b9b3062c05ecfd 100644 (file)
@@ -510,6 +510,8 @@ static void do_unoptimize_kprobes(void)
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+               /* Switching from detour code to origin */
+               op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
                /* Disarm probes if marked disabled */
                if (kprobe_disabled(&op->kp))
                        arch_disarm_kprobe(&op->kp);
@@ -610,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
        mutex_unlock(&kprobe_mutex);
 }
 
+static bool optprobe_queued_unopt(struct optimized_kprobe *op)
+{
+       struct optimized_kprobe *_op;
+
+       list_for_each_entry(_op, &unoptimizing_list, list) {
+               if (op == _op)
+                       return true;
+       }
+
+       return false;
+}
+
 /* Optimize kprobe if p is ready to be optimized */
 static void optimize_kprobe(struct kprobe *p)
 {
@@ -631,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
                return;
 
        /* Check if it is already optimized. */
-       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
+       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
+               if (optprobe_queued_unopt(op)) {
+                       /* This is under unoptimizing. Just dequeue the probe */
+                       list_del_init(&op->list);
+               }
                return;
+       }
        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 
-       if (!list_empty(&op->list))
-               /* This is under unoptimizing. Just dequeue the probe */
-               list_del_init(&op->list);
-       else {
-               list_add(&op->list, &optimizing_list);
-               kick_kprobe_optimizer();
-       }
+       /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
+       if (WARN_ON_ONCE(!list_empty(&op->list)))
+               return;
+
+       list_add(&op->list, &optimizing_list);
+       kick_kprobe_optimizer();
 }
 
 /* Short cut to direct unoptimizing */
@@ -649,6 +667,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 {
        lockdep_assert_cpus_held();
        arch_unoptimize_kprobe(op);
+       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
        if (kprobe_disabled(&op->kp))
                arch_disarm_kprobe(&op->kp);
 }
@@ -662,31 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
                return; /* This is not an optprobe nor optimized */
 
        op = container_of(p, struct optimized_kprobe, kp);
-       if (!kprobe_optimized(p)) {
-               /* Unoptimized or unoptimizing case */
-               if (force && !list_empty(&op->list)) {
-                       /*
-                        * Only if this is unoptimizing kprobe and forced,
-                        * forcibly unoptimize it. (No need to unoptimize
-                        * unoptimized kprobe again :)
-                        */
-                       list_del_init(&op->list);
-                       force_unoptimize_kprobe(op);
-               }
+       if (!kprobe_optimized(p))
                return;
-       }
 
-       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
        if (!list_empty(&op->list)) {
-               /* Dequeue from the optimization queue */
-               list_del_init(&op->list);
+               if (optprobe_queued_unopt(op)) {
+                       /* Queued in unoptimizing queue */
+                       if (force) {
+                               /*
+                                * Forcibly unoptimize the kprobe here, and queue it
+                                * in the freeing list for release afterwards.
+                                */
+                               force_unoptimize_kprobe(op);
+                               list_move(&op->list, &freeing_list);
+                       }
+               } else {
+                       /* Dequeue from the optimizing queue */
+                       list_del_init(&op->list);
+                       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+               }
                return;
        }
+
        /* Optimized kprobe case */
-       if (force)
+       if (force) {
                /* Forcibly update the code: this is a special case */
                force_unoptimize_kprobe(op);
-       else {
+       else {
                list_add(&op->list, &unoptimizing_list);
                kick_kprobe_optimizer();
        }
index 32282e7112d3990d8bf18ea172f66439f9c445d4..32406ef0d6a2d28e74122e2b16968f758275590f 100644 (file)
@@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void)
        struct lock_trace *trace, *t2;
        struct hlist_head *hash_head;
        u32 hash;
-       unsigned int max_entries;
+       int max_entries;
 
        BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
        BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
@@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void)
        trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
        max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
                LOCK_TRACE_SIZE_IN_LONGS;
-       trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
 
-       if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES -
-           LOCK_TRACE_SIZE_IN_LONGS - 1) {
+       if (max_entries <= 0) {
                if (!debug_locks_off_graph_unlock())
                        return NULL;
 
@@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void)
 
                return NULL;
        }
+       trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
 
        hash = jhash(trace->entries, trace->nr_entries *
                     sizeof(trace->entries[0]), 0);
index dadb7b7fba3733ca7dfe80cf5624c88b7c147c96..9bb6d2497b04037292d9c9be22908b79eaf1fa41 100644 (file)
@@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
        seq_printf(m, " stack-trace entries:           %11lu [max: %lu]\n",
                        nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
-       seq_printf(m, " number of stack traces:        %llu\n",
+       seq_printf(m, " number of stack traces:        %11llu\n",
                   lockdep_stack_trace_count());
-       seq_printf(m, " number of stack hash chains:   %llu\n",
+       seq_printf(m, " number of stack hash chains:   %11llu\n",
                   lockdep_stack_hash_count());
 #endif
        seq_printf(m, " combined max dependencies:     %11u\n",
index 54cc5f9286e93ee708fea396651ff3f53d1dbb1a..5352ce50a97e309b5bf8cdd2059378b17a8f14ec 100644 (file)
@@ -733,9 +733,6 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
  */
 void __sched mutex_unlock(struct mutex *lock)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
-       WARN_ON(in_interrupt());
-#endif
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
        if (__mutex_unlock_fast(lock))
                return;
@@ -1416,7 +1413,6 @@ int __sched mutex_trylock(struct mutex *lock)
 
 #ifdef CONFIG_DEBUG_MUTEXES
        DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-       WARN_ON(in_interrupt());
 #endif
 
        locked = __mutex_trylock(lock);
index 6ef600aa0f47e7dd2cbc8901ccb6397e098ab759..1f7734949ac883792569a53524b7ae6237a8a1ea 100644 (file)
@@ -134,20 +134,17 @@ bool osq_lock(struct optimistic_spin_queue *lock)
         * cmpxchg in an attempt to undo our queueing.
         */
 
-       while (!READ_ONCE(node->locked)) {
-               /*
-                * If we need to reschedule bail... so we can block.
-                * Use vcpu_is_preempted() to avoid waiting for a preempted
-                * lock holder:
-                */
-               if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
-                       goto unqueue;
-
-               cpu_relax();
-       }
-       return true;
+       /*
+        * Wait to acquire the lock or cancelation. Note that need_resched()
+        * will come with an IPI, which will wake smp_cond_load_relaxed() if it
+        * is implemented with a monitor-wait. vcpu_is_preempted() relies on
+        * polling, be careful.
+        */
+       if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
+                                 vcpu_is_preempted(node_cpu(node->prev))))
+               return true;
 
-unqueue:
+       /* unqueue */
        /*
         * Step - A  -- stabilize @prev
         *
index 2473f10c6956a94319a4fb52f4727223d2d70bd9..b9515fcc9b297147a34adbe62b0d86d6168e7fac 100644 (file)
 /*
  * The basic principle of a queue-based spinlock can best be understood
  * by studying a classic queue-based spinlock implementation called the
- * MCS lock. The paper below provides a good description for this kind
- * of lock.
+ * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
+ * Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and
+ * Scott") is available at
  *
- * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
+ * https://bugzilla.kernel.org/show_bug.cgi?id=206115
  *
- * This queued spinlock implementation is based on the MCS lock, however to make
- * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
- * API, we must modify it somehow.
+ * This queued spinlock implementation is based on the MCS lock, however to
+ * make it fit the 4 bytes we assume spinlock_t to be, and preserve its
+ * existing API, we must modify it somehow.
  *
  * In particular; where the traditional MCS lock consists of a tail pointer
  * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
index 44e68761f43213144a61b0cd2b489cfd1b8358bd..0d9b6be9ecc886cdae8067f479a6cd915929fef6 100644 (file)
@@ -1226,8 +1226,8 @@ wait:
                 * In this case, we attempt to acquire the lock again
                 * without sleeping.
                 */
-               if ((wstate == WRITER_HANDOFF) &&
-                   (rwsem_spin_on_owner(sem, 0) == OWNER_NULL))
+               if (wstate == WRITER_HANDOFF &&
+                   rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
                        goto trylock_again;
 
                /* Block until there are no active lockers. */
index 399669f7eba8e9569830e1c35b7f37c3c712025b..472dd462a40ca91ec62a773cf8fde19a25c72994 100644 (file)
@@ -51,19 +51,19 @@ EXPORT_SYMBOL(__rwlock_init);
 
 static void spin_dump(raw_spinlock_t *lock, const char *msg)
 {
-       struct task_struct *owner = NULL;
+       struct task_struct *owner = READ_ONCE(lock->owner);
 
-       if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
-               owner = lock->owner;
+       if (owner == SPINLOCK_OWNER_INIT)
+               owner = NULL;
        printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
                msg, raw_smp_processor_id(),
                current->comm, task_pid_nr(current));
        printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
                        ".owner_cpu: %d\n",
-               lock, lock->magic,
+               lock, READ_ONCE(lock->magic),
                owner ? owner->comm : "<none>",
                owner ? task_pid_nr(owner) : -1,
-               lock->owner_cpu);
+               READ_ONCE(lock->owner_cpu));
        dump_stack();
 }
 
@@ -80,16 +80,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
 static inline void
 debug_spin_lock_before(raw_spinlock_t *lock)
 {
-       SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
-       SPIN_BUG_ON(lock->owner == current, lock, "recursion");
-       SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+       SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
+       SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
+       SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
                                                        lock, "cpu recursion");
 }
 
 static inline void debug_spin_lock_after(raw_spinlock_t *lock)
 {
-       lock->owner_cpu = raw_smp_processor_id();
-       lock->owner = current;
+       WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+       WRITE_ONCE(lock->owner, current);
 }
 
 static inline void debug_spin_unlock(raw_spinlock_t *lock)
@@ -99,8 +99,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
        SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
        SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
                                                        lock, "wrong CPU");
-       lock->owner = SPINLOCK_OWNER_INIT;
-       lock->owner_cpu = -1;
+       WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
+       WRITE_ONCE(lock->owner_cpu, -1);
 }
 
 /*
@@ -187,8 +187,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
 
 static inline void debug_write_lock_after(rwlock_t *lock)
 {
-       lock->owner_cpu = raw_smp_processor_id();
-       lock->owner = current;
+       WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+       WRITE_ONCE(lock->owner, current);
 }
 
 static inline void debug_write_unlock(rwlock_t *lock)
@@ -197,8 +197,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
        RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
        RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
                                                        lock, "wrong CPU");
-       lock->owner = SPINLOCK_OWNER_INIT;
-       lock->owner_cpu = -1;
+       WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
+       WRITE_ONCE(lock->owner_cpu, -1);
 }
 
 void do_raw_write_lock(rwlock_t *lock)
index 3a486f8262249b1be38a01461d2329732bde11ab..ac058a5ad1d135a1d0d67223b7962e487bf6c752 100644 (file)
@@ -2031,49 +2031,6 @@ static void module_enable_nx(const struct module *mod)
        frob_writable_data(&mod->init_layout, set_memory_nx);
 }
 
-/* Iterate through all modules and set each module's text as RW */
-void set_all_modules_text_rw(void)
-{
-       struct module *mod;
-
-       if (!rodata_enabled)
-               return;
-
-       mutex_lock(&module_mutex);
-       list_for_each_entry_rcu(mod, &modules, list) {
-               if (mod->state == MODULE_STATE_UNFORMED)
-                       continue;
-
-               frob_text(&mod->core_layout, set_memory_rw);
-               frob_text(&mod->init_layout, set_memory_rw);
-       }
-       mutex_unlock(&module_mutex);
-}
-
-/* Iterate through all modules and set each module's text as RO */
-void set_all_modules_text_ro(void)
-{
-       struct module *mod;
-
-       if (!rodata_enabled)
-               return;
-
-       mutex_lock(&module_mutex);
-       list_for_each_entry_rcu(mod, &modules, list) {
-               /*
-                * Ignore going modules since it's possible that ro
-                * protection has already been disabled, otherwise we'll
-                * run into protection faults at module deallocation.
-                */
-               if (mod->state == MODULE_STATE_UNFORMED ||
-                       mod->state == MODULE_STATE_GOING)
-                       continue;
-
-               frob_text(&mod->core_layout, set_memory_ro);
-               frob_text(&mod->init_layout, set_memory_ro);
-       }
-       mutex_unlock(&module_mutex);
-}
 #else /* !CONFIG_STRICT_MODULE_RWX */
 static void module_enable_nx(const struct module *mod) { }
 #endif /*  CONFIG_STRICT_MODULE_RWX */
@@ -3730,6 +3687,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
 
        module_enable_ro(mod, false);
        module_enable_nx(mod);
+       module_enable_x(mod);
 
        /* Mark state as coming so strong_try_module_get() ignores us,
         * but kallsyms etc. can see us. */
@@ -3752,11 +3710,6 @@ static int prepare_coming_module(struct module *mod)
        if (err)
                return err;
 
-       /* Make module executable after ftrace is enabled */
-       mutex_lock(&module_mutex);
-       module_enable_x(mod);
-       mutex_unlock(&module_mutex);
-
        blocking_notifier_call_chain(&module_notify_list,
                                     MODULE_STATE_COMING, mod);
        return 0;
index c815f58e6bc087db1f2f57f1736abfc5bf6dd474..ed9882108cd28f604e09f4a6e4f09137b3881076 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pid_namespace.h>
 #include <net/net_namespace.h>
 #include <linux/ipc_namespace.h>
+#include <linux/time_namespace.h>
 #include <linux/proc_ns.h>
 #include <linux/file.h>
 #include <linux/syscalls.h>
@@ -40,6 +41,10 @@ struct nsproxy init_nsproxy = {
 #ifdef CONFIG_CGROUPS
        .cgroup_ns              = &init_cgroup_ns,
 #endif
+#ifdef CONFIG_TIME_NS
+       .time_ns                = &init_time_ns,
+       .time_ns_for_children   = &init_time_ns,
+#endif
 };
 
 static inline struct nsproxy *create_nsproxy(void)
@@ -106,8 +111,18 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
                goto out_net;
        }
 
+       new_nsp->time_ns_for_children = copy_time_ns(flags, user_ns,
+                                       tsk->nsproxy->time_ns_for_children);
+       if (IS_ERR(new_nsp->time_ns_for_children)) {
+               err = PTR_ERR(new_nsp->time_ns_for_children);
+               goto out_time;
+       }
+       new_nsp->time_ns = get_time_ns(tsk->nsproxy->time_ns);
+
        return new_nsp;
 
+out_time:
+       put_net(new_nsp->net_ns);
 out_net:
        put_cgroup_ns(new_nsp->cgroup_ns);
 out_cgroup:
@@ -136,15 +151,16 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
        struct nsproxy *old_ns = tsk->nsproxy;
        struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
        struct nsproxy *new_ns;
+       int ret;
 
        if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
                              CLONE_NEWPID | CLONE_NEWNET |
-                             CLONE_NEWCGROUP)))) {
-               get_nsproxy(old_ns);
-               return 0;
-       }
-
-       if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+                             CLONE_NEWCGROUP | CLONE_NEWTIME)))) {
+               if (likely(old_ns->time_ns_for_children == old_ns->time_ns)) {
+                       get_nsproxy(old_ns);
+                       return 0;
+               }
+       } else if (!ns_capable(user_ns, CAP_SYS_ADMIN))
                return -EPERM;
 
        /*
@@ -162,6 +178,12 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
        if (IS_ERR(new_ns))
                return  PTR_ERR(new_ns);
 
+       ret = timens_on_fork(new_ns, tsk);
+       if (ret) {
+               free_nsproxy(new_ns);
+               return ret;
+       }
+
        tsk->nsproxy = new_ns;
        return 0;
 }
@@ -176,6 +198,10 @@ void free_nsproxy(struct nsproxy *ns)
                put_ipc_ns(ns->ipc_ns);
        if (ns->pid_ns_for_children)
                put_pid_ns(ns->pid_ns_for_children);
+       if (ns->time_ns)
+               put_time_ns(ns->time_ns);
+       if (ns->time_ns_for_children)
+               put_time_ns(ns->time_ns_for_children);
        put_cgroup_ns(ns->cgroup_ns);
        put_net(ns->net_ns);
        kmem_cache_free(nsproxy_cachep, ns);
@@ -192,7 +218,8 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
        int err = 0;
 
        if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
-                              CLONE_NEWNET | CLONE_NEWPID | CLONE_NEWCGROUP)))
+                              CLONE_NEWNET | CLONE_NEWPID | CLONE_NEWCGROUP |
+                              CLONE_NEWTIME)))
                return 0;
 
        user_ns = new_cred ? new_cred->user_ns : current_user_ns();
index d3667b4075c19b08932e2d2880a4307ecc668f28..7cbfbeacd68a64c3ff96c02baf96e1f3b6e943bd 100644 (file)
@@ -27,7 +27,10 @@ config SUSPEND_SKIP_SYNC
          Skip the kernel sys_sync() before freezing user processes.
          Some systems prefer not to pay this cost on every invocation
          of suspend, or they are content with invoking sync() from
-         user-space before invoking suspend.  Say Y if that's your case.
+         user-space before invoking suspend.  There's a run-time switch
+         at '/sys/power/sync_on_suspend' to configure this behaviour.
+         This setting changes the default for the run-tim switch. Say Y
+         to change the default to disable the kernel sys_sync().
 
 config HIBERNATE_CALLBACKS
        bool
index 3c0a5a8170b02634df7c289be46d49124718023d..6dbeedb7354cc7f44b4de952ae14535c4b6cc352 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com>
  */
 
-#define pr_fmt(fmt) "PM: " fmt
+#define pr_fmt(fmt) "PM: hibernation: " fmt
 
 #include <linux/export.h>
 #include <linux/suspend.h>
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(system_entering_hibernation);
 #ifdef CONFIG_PM_DEBUG
 static void hibernation_debug_sleep(void)
 {
-       pr_info("hibernation debug: Waiting for 5 seconds.\n");
+       pr_info("debug: Waiting for 5 seconds.\n");
        mdelay(5000);
 }
 
@@ -277,7 +277,7 @@ static int create_image(int platform_mode)
 
        error = dpm_suspend_end(PMSG_FREEZE);
        if (error) {
-               pr_err("Some devices failed to power down, aborting hibernation\n");
+               pr_err("Some devices failed to power down, aborting\n");
                return error;
        }
 
@@ -295,7 +295,7 @@ static int create_image(int platform_mode)
 
        error = syscore_suspend();
        if (error) {
-               pr_err("Some system devices failed to power down, aborting hibernation\n");
+               pr_err("Some system devices failed to power down, aborting\n");
                goto Enable_irqs;
        }
 
@@ -310,7 +310,7 @@ static int create_image(int platform_mode)
        restore_processor_state();
        trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
        if (error)
-               pr_err("Error %d creating hibernation image\n", error);
+               pr_err("Error %d creating image\n", error);
 
        if (!in_suspend) {
                events_check_enabled = false;
@@ -680,7 +680,7 @@ static int load_image_and_restore(void)
        if (!error)
                hibernation_restore(flags & SF_PLATFORM_MODE);
 
-       pr_err("Failed to load hibernation image, recovering.\n");
+       pr_err("Failed to load image, recovering.\n");
        swsusp_free();
        free_basic_memory_bitmaps();
  Unlock:
@@ -743,7 +743,7 @@ int hibernate(void)
                else
                        flags |= SF_CRC32_MODE;
 
-               pm_pr_dbg("Writing image.\n");
+               pm_pr_dbg("Writing hibernation image.\n");
                error = swsusp_write(flags);
                swsusp_free();
                if (!error) {
@@ -755,7 +755,7 @@ int hibernate(void)
                in_suspend = 0;
                pm_restore_gfp_mask();
        } else {
-               pm_pr_dbg("Image restored successfully.\n");
+               pm_pr_dbg("Hibernation image restored successfully.\n");
        }
 
  Free_bitmaps:
@@ -894,7 +894,7 @@ static int software_resume(void)
                goto Close_Finish;
        }
 
-       pm_pr_dbg("Preparing processes for restore.\n");
+       pm_pr_dbg("Preparing processes for hibernation restore.\n");
        error = freeze_processes();
        if (error)
                goto Close_Finish;
@@ -903,7 +903,7 @@ static int software_resume(void)
  Finish:
        __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
        pm_restore_console();
-       pr_info("resume from hibernation failed (%d)\n", error);
+       pr_info("resume failed (%d)\n", error);
        atomic_inc(&snapshot_device_available);
        /* For success case, the suspend path will release the lock */
  Unlock:
@@ -1068,7 +1068,8 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
        lock_system_sleep();
        swsusp_resume_device = res;
        unlock_system_sleep();
-       pm_pr_dbg("Configured resume from disk to %u\n", swsusp_resume_device);
+       pm_pr_dbg("Configured hibernation resume from disk to %u\n",
+                 swsusp_resume_device);
        noresume = 0;
        software_resume();
        return n;
index e26de7af520beba281df4e42f11f06b150a0fab7..69b7a8aeca3b9878006df6e16e564c60b0c9fdc0 100644 (file)
@@ -190,6 +190,38 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr
 }
 
 power_attr(mem_sleep);
+
+/*
+ * sync_on_suspend: invoke ksys_sync_helper() before suspend.
+ *
+ * show() returns whether ksys_sync_helper() is invoked before suspend.
+ * store() accepts 0 or 1.  0 disables ksys_sync_helper() and 1 enables it.
+ */
+bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
+
+static ssize_t sync_on_suspend_show(struct kobject *kobj,
+                                  struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", sync_on_suspend_enabled);
+}
+
+static ssize_t sync_on_suspend_store(struct kobject *kobj,
+                                   struct kobj_attribute *attr,
+                                   const char *buf, size_t n)
+{
+       unsigned long val;
+
+       if (kstrtoul(buf, 10, &val))
+               return -EINVAL;
+
+       if (val > 1)
+               return -EINVAL;
+
+       sync_on_suspend_enabled = !!val;
+       return n;
+}
+
+power_attr(sync_on_suspend);
 #endif /* CONFIG_SUSPEND */
 
 #ifdef CONFIG_PM_SLEEP_DEBUG
@@ -855,6 +887,7 @@ static struct attribute * g[] = {
        &wakeup_count_attr.attr,
 #ifdef CONFIG_SUSPEND
        &mem_sleep_attr.attr,
+       &sync_on_suspend_attr.attr,
 #endif
 #ifdef CONFIG_PM_AUTOSLEEP
        &autosleep_attr.attr,
index 26b9168321e765f85d3d90190644c04e7f2b6ce7..ddade80ad27670c33f84bbbedd1328edf42a3ace 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  */
 
-#define pr_fmt(fmt) "PM: " fmt
+#define pr_fmt(fmt) "PM: hibernation: " fmt
 
 #include <linux/version.h>
 #include <linux/module.h>
@@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
 
 void clear_free_pages(void)
 {
-#ifdef CONFIG_PAGE_POISONING_ZERO
        struct memory_bitmap *bm = free_pages_map;
        unsigned long pfn;
 
        if (WARN_ON(!(free_pages_map)))
                return;
 
-       memory_bm_position_reset(bm);
-       pfn = memory_bm_next_pfn(bm);
-       while (pfn != BM_END_OF_MAP) {
-               if (pfn_valid(pfn))
-                       clear_highpage(pfn_to_page(pfn));
-
+       if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
+               memory_bm_position_reset(bm);
                pfn = memory_bm_next_pfn(bm);
+               while (pfn != BM_END_OF_MAP) {
+                       if (pfn_valid(pfn))
+                               clear_highpage(pfn_to_page(pfn));
+
+                       pfn = memory_bm_next_pfn(bm);
+               }
+               memory_bm_position_reset(bm);
+               pr_info("free pages cleared after restore\n");
        }
-       memory_bm_position_reset(bm);
-       pr_info("free pages cleared after restore\n");
-#endif /* PAGE_POISONING_ZERO */
 }
 
 /**
@@ -1566,9 +1566,7 @@ static unsigned long preallocate_image_highmem(unsigned long nr_pages)
  */
 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
 {
-       x *= multiplier;
-       do_div(x, base);
-       return (unsigned long)x;
+       return div64_u64(x * multiplier, base);
 }
 
 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
@@ -1705,16 +1703,20 @@ int hibernate_preallocate_memory(void)
        ktime_t start, stop;
        int error;
 
-       pr_info("Preallocating image memory... ");
+       pr_info("Preallocating image memory\n");
        start = ktime_get();
 
        error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
-       if (error)
+       if (error) {
+               pr_err("Cannot allocate original bitmap\n");
                goto err_out;
+       }
 
        error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
-       if (error)
+       if (error) {
+               pr_err("Cannot allocate copy bitmap\n");
                goto err_out;
+       }
 
        alloc_normal = 0;
        alloc_highmem = 0;
@@ -1804,8 +1806,11 @@ int hibernate_preallocate_memory(void)
                alloc -= pages;
                pages += pages_highmem;
                pages_highmem = preallocate_image_highmem(alloc);
-               if (pages_highmem < alloc)
+               if (pages_highmem < alloc) {
+                       pr_err("Image allocation is %lu pages short\n",
+                               alloc - pages_highmem);
                        goto err_out;
+               }
                pages += pages_highmem;
                /*
                 * size is the desired number of saveable pages to leave in
@@ -1836,13 +1841,12 @@ int hibernate_preallocate_memory(void)
 
  out:
        stop = ktime_get();
-       pr_cont("done (allocated %lu pages)\n", pages);
+       pr_info("Allocated %lu pages for snapshot\n", pages);
        swsusp_show_speed(start, stop, pages, "Allocated");
 
        return 0;
 
  err_out:
-       pr_cont("\n");
        swsusp_free();
        return -ENOMEM;
 }
@@ -1976,7 +1980,7 @@ asmlinkage __visible int swsusp_save(void)
 {
        unsigned int nr_pages, nr_highmem;
 
-       pr_info("Creating hibernation image:\n");
+       pr_info("Creating image:\n");
 
        drain_local_pages(NULL);
        nr_pages = count_data_pages();
@@ -2010,7 +2014,7 @@ asmlinkage __visible int swsusp_save(void)
        nr_copy_pages = nr_pages;
        nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
 
-       pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
+       pr_info("Image created (%d pages copied)\n", nr_pages);
 
        return 0;
 }
index f3b7239f1892fd501ca6a4412e9a4f7f6086bc3b..2c47280fbfc7a4a92f89769cf7e30c7cb60784e9 100644 (file)
@@ -564,7 +564,7 @@ static int enter_state(suspend_state_t state)
        if (state == PM_SUSPEND_TO_IDLE)
                s2idle_begin();
 
-       if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
+       if (sync_on_suspend_enabled) {
                trace_suspend_resume(TPS("sync_filesystems"), 0, true);
                ksys_sync_helper();
                trace_suspend_resume(TPS("sync_filesystems"), 0, false);
index 60564b58de0777864e9696f38cf32354518d208c..e1ed58adb69e480b8537bf4a37b2d2e1f23be9b2 100644 (file)
@@ -70,7 +70,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
        static char info_test[] __initdata =
                KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
 
-       unsigned long           now;
+       time64_t                now;
        struct rtc_wkalrm       alm;
        int                     status;
 
@@ -81,10 +81,10 @@ repeat:
                printk(err_readtime, dev_name(&rtc->dev), status);
                return;
        }
-       rtc_tm_to_time(&alm.time, &now);
+       now = rtc_tm_to_time64(&alm.time);
 
        memset(&alm, 0, sizeof alm);
-       rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
+       rtc_time64_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
        alm.enabled = true;
 
        status = rtc_set_alarm(rtc, &alm);
index cb9ddcc08119added2355311507c65db3849b0c4..43d6179508d645e5decfbf24dde54ed1815df470 100644 (file)
@@ -264,12 +264,17 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
        return ret;
 }
 
-static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
+                          unsigned int mode)
 {
+       int ret;
+
        if (mode & PTRACE_MODE_NOAUDIT)
-               return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
+               ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
        else
-               return has_ns_capability(current, ns, CAP_SYS_PTRACE);
+               ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
+
+       return ret == 0;
 }
 
 /* Returns 0 on success, -errno on denial. */
@@ -321,7 +326,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
            gid_eq(caller_gid, tcred->sgid) &&
            gid_eq(caller_gid, tcred->gid))
                goto ok;
-       if (ptrace_has_cap(tcred->user_ns, mode))
+       if (ptrace_has_cap(cred, tcred->user_ns, mode))
                goto ok;
        rcu_read_unlock();
        return -EPERM;
@@ -340,7 +345,7 @@ ok:
        mm = task->mm;
        if (mm &&
            ((get_dumpable(mm) != SUID_DUMP_USER) &&
-            !ptrace_has_cap(mm->user_ns, mode)))
+            !ptrace_has_cap(cred, mm->user_ns, mode)))
            return -EPERM;
 
        return security_ptrace_access_check(task, mode);
index 7644eda17d624cae5a5ed836e8efd38aa3e69905..1cc940fef17c39c982af652cd45aaf342afcee9e 100644 (file)
@@ -7,7 +7,7 @@ menu "RCU Subsystem"
 
 config TREE_RCU
        bool
-       default y if !PREEMPTION && SMP
+       default y if SMP
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
@@ -17,6 +17,7 @@ config TREE_RCU
 config PREEMPT_RCU
        bool
        default y if PREEMPTION
+       select TREE_RCU
        help
          This option selects the RCU implementation that is
          designed for very large SMP systems with hundreds or
@@ -78,7 +79,7 @@ config TASKS_RCU
          user-mode execution as quiescent states.
 
 config RCU_STALL_COMMON
-       def_bool ( TREE_RCU || PREEMPT_RCU )
+       def_bool TREE_RCU
        help
          This option enables RCU CPU stall code that is common between
          the TINY and TREE variants of RCU.  The purpose is to allow
@@ -86,13 +87,13 @@ config RCU_STALL_COMMON
          making these warnings mandatory for the tree variants.
 
 config RCU_NEED_SEGCBLIST
-       def_bool ( TREE_RCU || PREEMPT_RCU || TREE_SRCU )
+       def_bool ( TREE_RCU || TREE_SRCU )
 
 config RCU_FANOUT
        int "Tree-based hierarchical RCU fanout value"
        range 2 64 if 64BIT
        range 2 32 if !64BIT
-       depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
+       depends on TREE_RCU && RCU_EXPERT
        default 64 if 64BIT
        default 32 if !64BIT
        help
@@ -112,7 +113,7 @@ config RCU_FANOUT_LEAF
        int "Tree-based hierarchical RCU leaf-level fanout value"
        range 2 64 if 64BIT
        range 2 32 if !64BIT
-       depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
+       depends on TREE_RCU && RCU_EXPERT
        default 16
        help
          This option controls the leaf-level fanout of hierarchical
@@ -187,7 +188,7 @@ config RCU_BOOST_DELAY
 
 config RCU_NOCB_CPU
        bool "Offload RCU callback processing from boot-selected CPUs"
-       depends on TREE_RCU || PREEMPT_RCU
+       depends on TREE_RCU
        depends on RCU_EXPERT || NO_HZ_FULL
        default n
        help
@@ -200,8 +201,8 @@ config RCU_NOCB_CPU
          specified at boot time by the rcu_nocbs parameter.  For each
          such CPU, a kthread ("rcuox/N") will be created to invoke
          callbacks, where the "N" is the CPU being offloaded, and where
-         the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched
-         (!PREEMPT kernels).  Nothing prevents this kthread from running
+         the "p" for RCU-preempt (PREEMPTION kernels) and "s" for RCU-sched
+         (!PREEMPTION kernels).  Nothing prevents this kthread from running
          on the specified CPUs, but (1) the kthreads may be preempted
          between each callback, and (2) affinity or cgroups can be used
          to force the kthreads to run on whatever set of CPUs is desired.
index 020e8b6a644b4a97ec98d7d529b9fa4018106c62..82d5fba48b2f0a87344de6c69802f66249c99c40 100644 (file)
@@ -9,6 +9,5 @@ obj-$(CONFIG_TINY_SRCU) += srcutiny.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
 obj-$(CONFIG_TREE_RCU) += tree.o
-obj-$(CONFIG_PREEMPT_RCU) += tree.o
 obj-$(CONFIG_TINY_RCU) += tiny.o
 obj-$(CONFIG_RCU_NEED_SEGCBLIST) += rcu_segcblist.o
index ab504fbc76cad73efa1381cdd3b8cf23b6a40fe8..05f936ed167a76acc484584ab8bd33b222c2fd49 100644 (file)
@@ -198,33 +198,6 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 }
 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 
-void kfree(const void *);
-
-/*
- * Reclaim the specified callback, either by invoking it (non-lazy case)
- * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
- */
-static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
-{
-       rcu_callback_t f;
-       unsigned long offset = (unsigned long)head->func;
-
-       rcu_lock_acquire(&rcu_callback_map);
-       if (__is_kfree_rcu_offset(offset)) {
-               trace_rcu_invoke_kfree_callback(rn, head, offset);
-               kfree((void *)head - offset);
-               rcu_lock_release(&rcu_callback_map);
-               return true;
-       } else {
-               trace_rcu_invoke_callback(rn, head);
-               f = head->func;
-               WRITE_ONCE(head->func, (rcu_callback_t)0L);
-               f(head);
-               rcu_lock_release(&rcu_callback_map);
-               return false;
-       }
-}
-
 #ifdef CONFIG_RCU_STALL_COMMON
 
 extern int rcu_cpu_stall_ftrace_dump;
@@ -281,7 +254,7 @@ void rcu_test_sync_prims(void);
  */
 extern void resched_cpu(int cpu);
 
-#if defined(SRCU) || !defined(TINY_RCU)
+#if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
 
 #include <linux/rcu_node_tree.h>
 
@@ -418,7 +391,7 @@ do {                                                                        \
 #define raw_lockdep_assert_held_rcu_node(p)                            \
        lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
 
-#endif /* #if defined(SRCU) || !defined(TINY_RCU) */
+#endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
 
 #ifdef CONFIG_SRCU
 void srcu_init(void);
@@ -454,7 +427,7 @@ enum rcutorture_type {
        INVALID_RCU_FLAVOR
 };
 
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
                            unsigned long *gp_seq);
 void do_trace_rcu_torture_read(const char *rcutorturename,
index cbc87b804db9bba804cdd7d91f25de2392784f2e..5f4fd3b8777cafdd74ae67fb61a9be8d48b1f65d 100644 (file)
@@ -20,14 +20,10 @@ void rcu_cblist_init(struct rcu_cblist *rclp)
        rclp->head = NULL;
        rclp->tail = &rclp->head;
        rclp->len = 0;
-       rclp->len_lazy = 0;
 }
 
 /*
  * Enqueue an rcu_head structure onto the specified callback list.
- * This function assumes that the callback is non-lazy because it
- * is intended for use by no-CBs CPUs, which do not distinguish
- * between lazy and non-lazy RCU callbacks.
  */
 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp)
 {
@@ -54,7 +50,6 @@ void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
        else
                drclp->tail = &drclp->head;
        drclp->len = srclp->len;
-       drclp->len_lazy = srclp->len_lazy;
        if (!rhp) {
                rcu_cblist_init(srclp);
        } else {
@@ -62,16 +57,12 @@ void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
                srclp->head = rhp;
                srclp->tail = &rhp->next;
                WRITE_ONCE(srclp->len, 1);
-               srclp->len_lazy = 0;
        }
 }
 
 /*
  * Dequeue the oldest rcu_head structure from the specified callback
- * list.  This function assumes that the callback is non-lazy, but
- * the caller can later invoke rcu_cblist_dequeued_lazy() if it
- * finds otherwise (and if it cares about laziness).  This allows
- * different users to have different ways of determining laziness.
+ * list.
  */
 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
 {
@@ -161,7 +152,6 @@ void rcu_segcblist_init(struct rcu_segcblist *rsclp)
        for (i = 0; i < RCU_CBLIST_NSEGS; i++)
                rsclp->tails[i] = &rsclp->head;
        rcu_segcblist_set_len(rsclp, 0);
-       rsclp->len_lazy = 0;
        rsclp->enabled = 1;
 }
 
@@ -173,7 +163,6 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
 {
        WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
        WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
-       WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
        rsclp->enabled = 0;
 }
 
@@ -253,11 +242,9 @@ bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp)
  * absolutely not OK for it to ever miss posting a callback.
  */
 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
-                          struct rcu_head *rhp, bool lazy)
+                          struct rcu_head *rhp)
 {
        rcu_segcblist_inc_len(rsclp);
-       if (lazy)
-               rsclp->len_lazy++;
        smp_mb(); /* Ensure counts are updated before callback is enqueued. */
        rhp->next = NULL;
        WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
@@ -275,15 +262,13 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
  * period.  You have been warned.
  */
 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
-                          struct rcu_head *rhp, bool lazy)
+                          struct rcu_head *rhp)
 {
        int i;
 
        if (rcu_segcblist_n_cbs(rsclp) == 0)
                return false;
        rcu_segcblist_inc_len(rsclp);
-       if (lazy)
-               rsclp->len_lazy++;
        smp_mb(); /* Ensure counts are updated before callback is entrained. */
        rhp->next = NULL;
        for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
@@ -307,8 +292,6 @@ bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
 void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
                                               struct rcu_cblist *rclp)
 {
-       rclp->len_lazy += rsclp->len_lazy;
-       rsclp->len_lazy = 0;
        rclp->len = rcu_segcblist_xchg_len(rsclp, 0);
 }
 
@@ -361,9 +344,7 @@ void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
                                struct rcu_cblist *rclp)
 {
-       rsclp->len_lazy += rclp->len_lazy;
        rcu_segcblist_add_len(rsclp, rclp->len);
-       rclp->len_lazy = 0;
        rclp->len = 0;
 }
 
index 815c2fdd3fccf3fb3b76e6c2c53f975f40915812..5c293afc07b8e955f1965a93e514664acef440e7 100644 (file)
@@ -15,15 +15,6 @@ static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
        return READ_ONCE(rclp->len);
 }
 
-/*
- * Account for the fact that a previously dequeued callback turned out
- * to be marked as lazy.
- */
-static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
-{
-       rclp->len_lazy--;
-}
-
 void rcu_cblist_init(struct rcu_cblist *rclp);
 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
 void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
@@ -59,18 +50,6 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
 #endif
 }
 
-/* Return number of lazy callbacks in segmented callback list. */
-static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
-{
-       return rsclp->len_lazy;
-}
-
-/* Return number of lazy callbacks in segmented callback list. */
-static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
-{
-       return rcu_segcblist_n_cbs(rsclp) - rsclp->len_lazy;
-}
-
 /*
  * Is the specified rcu_segcblist enabled, for example, not corresponding
  * to an offline CPU?
@@ -106,9 +85,9 @@ struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
 bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp);
 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
-                          struct rcu_head *rhp, bool lazy);
+                          struct rcu_head *rhp);
 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
-                          struct rcu_head *rhp, bool lazy);
+                          struct rcu_head *rhp);
 void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
                                 struct rcu_cblist *rclp);
 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
index 5f884d560384f7a46014afa2e7f4ee32ff7580be..da94b89cd531078c54ddaacfb1dce4a4d770484d 100644 (file)
@@ -86,6 +86,7 @@ torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
              "Shutdown at end of performance tests.");
 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
+torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() perf test?");
 
 static char *perf_type = "rcu";
 module_param(perf_type, charp, 0444);
@@ -105,8 +106,8 @@ static atomic_t n_rcu_perf_writer_finished;
 static wait_queue_head_t shutdown_wq;
 static u64 t_rcu_perf_writer_started;
 static u64 t_rcu_perf_writer_finished;
-static unsigned long b_rcu_perf_writer_started;
-static unsigned long b_rcu_perf_writer_finished;
+static unsigned long b_rcu_gp_test_started;
+static unsigned long b_rcu_gp_test_finished;
 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
 
 #define MAX_MEAS 10000
@@ -378,10 +379,10 @@ rcu_perf_writer(void *arg)
        if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
                t_rcu_perf_writer_started = t;
                if (gp_exp) {
-                       b_rcu_perf_writer_started =
+                       b_rcu_gp_test_started =
                                cur_ops->exp_completed() / 2;
                } else {
-                       b_rcu_perf_writer_started = cur_ops->get_gp_seq();
+                       b_rcu_gp_test_started = cur_ops->get_gp_seq();
                }
        }
 
@@ -429,10 +430,10 @@ retry:
                                PERFOUT_STRING("Test complete");
                                t_rcu_perf_writer_finished = t;
                                if (gp_exp) {
-                                       b_rcu_perf_writer_finished =
+                                       b_rcu_gp_test_finished =
                                                cur_ops->exp_completed() / 2;
                                } else {
-                                       b_rcu_perf_writer_finished =
+                                       b_rcu_gp_test_finished =
                                                cur_ops->get_gp_seq();
                                }
                                if (shutdown) {
@@ -515,8 +516,8 @@ rcu_perf_cleanup(void)
                         t_rcu_perf_writer_finished -
                         t_rcu_perf_writer_started,
                         ngps,
-                        rcuperf_seq_diff(b_rcu_perf_writer_finished,
-                                         b_rcu_perf_writer_started));
+                        rcuperf_seq_diff(b_rcu_gp_test_finished,
+                                         b_rcu_gp_test_started));
                for (i = 0; i < nrealwriters; i++) {
                        if (!writer_durations)
                                break;
@@ -584,6 +585,159 @@ rcu_perf_shutdown(void *arg)
        return -EINVAL;
 }
 
+/*
+ * kfree_rcu() performance tests: Start a kfree_rcu() loop on all CPUs for number
+ * of iterations and measure total time and number of GP for all iterations to complete.
+ */
+
+torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
+torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
+torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
+
+static struct task_struct **kfree_reader_tasks;
+static int kfree_nrealthreads;
+static atomic_t n_kfree_perf_thread_started;
+static atomic_t n_kfree_perf_thread_ended;
+
+struct kfree_obj {
+       char kfree_obj[8];
+       struct rcu_head rh;
+};
+
+static int
+kfree_perf_thread(void *arg)
+{
+       int i, loop = 0;
+       long me = (long)arg;
+       struct kfree_obj *alloc_ptr;
+       u64 start_time, end_time;
+
+       VERBOSE_PERFOUT_STRING("kfree_perf_thread task started");
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+       set_user_nice(current, MAX_NICE);
+
+       start_time = ktime_get_mono_fast_ns();
+
+       if (atomic_inc_return(&n_kfree_perf_thread_started) >= kfree_nrealthreads) {
+               if (gp_exp)
+                       b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
+               else
+                       b_rcu_gp_test_started = cur_ops->get_gp_seq();
+       }
+
+       do {
+               for (i = 0; i < kfree_alloc_num; i++) {
+                       alloc_ptr = kmalloc(sizeof(struct kfree_obj), GFP_KERNEL);
+                       if (!alloc_ptr)
+                               return -ENOMEM;
+
+                       kfree_rcu(alloc_ptr, rh);
+               }
+
+               cond_resched();
+       } while (!torture_must_stop() && ++loop < kfree_loops);
+
+       if (atomic_inc_return(&n_kfree_perf_thread_ended) >= kfree_nrealthreads) {
+               end_time = ktime_get_mono_fast_ns();
+
+               if (gp_exp)
+                       b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
+               else
+                       b_rcu_gp_test_finished = cur_ops->get_gp_seq();
+
+               pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld\n",
+                      (unsigned long long)(end_time - start_time), kfree_loops,
+                      rcuperf_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started));
+               if (shutdown) {
+                       smp_mb(); /* Assign before wake. */
+                       wake_up(&shutdown_wq);
+               }
+       }
+
+       torture_kthread_stopping("kfree_perf_thread");
+       return 0;
+}
+
+static void
+kfree_perf_cleanup(void)
+{
+       int i;
+
+       if (torture_cleanup_begin())
+               return;
+
+       if (kfree_reader_tasks) {
+               for (i = 0; i < kfree_nrealthreads; i++)
+                       torture_stop_kthread(kfree_perf_thread,
+                                            kfree_reader_tasks[i]);
+               kfree(kfree_reader_tasks);
+       }
+
+       torture_cleanup_end();
+}
+
+/*
+ * shutdown kthread.  Just waits to be awakened, then shuts down system.
+ */
+static int
+kfree_perf_shutdown(void *arg)
+{
+       do {
+               wait_event(shutdown_wq,
+                          atomic_read(&n_kfree_perf_thread_ended) >=
+                          kfree_nrealthreads);
+       } while (atomic_read(&n_kfree_perf_thread_ended) < kfree_nrealthreads);
+
+       smp_mb(); /* Wake before output. */
+
+       kfree_perf_cleanup();
+       kernel_power_off();
+       return -EINVAL;
+}
+
+static int __init
+kfree_perf_init(void)
+{
+       long i;
+       int firsterr = 0;
+
+       kfree_nrealthreads = compute_real(kfree_nthreads);
+       /* Start up the kthreads. */
+       if (shutdown) {
+               init_waitqueue_head(&shutdown_wq);
+               firsterr = torture_create_kthread(kfree_perf_shutdown, NULL,
+                                                 shutdown_task);
+               if (firsterr)
+                       goto unwind;
+               schedule_timeout_uninterruptible(1);
+       }
+
+       kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
+                              GFP_KERNEL);
+       if (kfree_reader_tasks == NULL) {
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+
+       for (i = 0; i < kfree_nrealthreads; i++) {
+               firsterr = torture_create_kthread(kfree_perf_thread, (void *)i,
+                                                 kfree_reader_tasks[i]);
+               if (firsterr)
+                       goto unwind;
+       }
+
+       while (atomic_read(&n_kfree_perf_thread_started) < kfree_nrealthreads)
+               schedule_timeout_uninterruptible(1);
+
+       torture_init_end();
+       return 0;
+
+unwind:
+       torture_init_end();
+       kfree_perf_cleanup();
+       return firsterr;
+}
+
 static int __init
 rcu_perf_init(void)
 {
@@ -616,6 +770,9 @@ rcu_perf_init(void)
        if (cur_ops->init)
                cur_ops->init();
 
+       if (kfree_rcu_test)
+               return kfree_perf_init();
+
        nrealwriters = compute_real(nwriters);
        nrealreaders = compute_real(nreaders);
        atomic_set(&n_rcu_perf_reader_started, 0);
index dee043feb71f4505cdb90f8f356c8adf37dd2d47..1aeecc165b2168b697f1410d8f4f38b79e755ec6 100644 (file)
@@ -1661,43 +1661,52 @@ static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
 struct rcu_fwd_cb {
        struct rcu_head rh;
        struct rcu_fwd_cb *rfc_next;
+       struct rcu_fwd *rfc_rfp;
        int rfc_gps;
 };
-static DEFINE_SPINLOCK(rcu_fwd_lock);
-static struct rcu_fwd_cb *rcu_fwd_cb_head;
-static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
-static long n_launders_cb;
-static unsigned long rcu_fwd_startat;
-static bool rcu_fwd_emergency_stop;
+
 #define MAX_FWD_CB_JIFFIES     (8 * HZ) /* Maximum CB test duration. */
 #define MIN_FWD_CB_LAUNDERS    3       /* This many CB invocations to count. */
 #define MIN_FWD_CBS_LAUNDERED  100     /* Number of counted CBs. */
 #define FWD_CBS_HIST_DIV       10      /* Histogram buckets/second. */
+#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
+
 struct rcu_launder_hist {
        long n_launders;
        unsigned long launder_gp_seq;
 };
-#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
-static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
-static unsigned long rcu_launder_gp_seq_start;
 
-static void rcu_torture_fwd_cb_hist(void)
+struct rcu_fwd {
+       spinlock_t rcu_fwd_lock;
+       struct rcu_fwd_cb *rcu_fwd_cb_head;
+       struct rcu_fwd_cb **rcu_fwd_cb_tail;
+       long n_launders_cb;
+       unsigned long rcu_fwd_startat;
+       struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
+       unsigned long rcu_launder_gp_seq_start;
+};
+
+struct rcu_fwd *rcu_fwds;
+bool rcu_fwd_emergency_stop;
+
+static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
 {
        unsigned long gps;
        unsigned long gps_old;
        int i;
        int j;
 
-       for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
-               if (n_launders_hist[i].n_launders > 0)
+       for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
+               if (rfp->n_launders_hist[i].n_launders > 0)
                        break;
        pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
-                __func__, jiffies - rcu_fwd_startat);
-       gps_old = rcu_launder_gp_seq_start;
+                __func__, jiffies - rfp->rcu_fwd_startat);
+       gps_old = rfp->rcu_launder_gp_seq_start;
        for (j = 0; j <= i; j++) {
-               gps = n_launders_hist[j].launder_gp_seq;
+               gps = rfp->n_launders_hist[j].launder_gp_seq;
                pr_cont(" %ds/%d: %ld:%ld",
-                       j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders,
+                       j + 1, FWD_CBS_HIST_DIV,
+                       rfp->n_launders_hist[j].n_launders,
                        rcutorture_seq_diff(gps, gps_old));
                gps_old = gps;
        }
@@ -1711,26 +1720,27 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
        int i;
        struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
        struct rcu_fwd_cb **rfcpp;
+       struct rcu_fwd *rfp = rfcp->rfc_rfp;
 
        rfcp->rfc_next = NULL;
        rfcp->rfc_gps++;
-       spin_lock_irqsave(&rcu_fwd_lock, flags);
-       rfcpp = rcu_fwd_cb_tail;
-       rcu_fwd_cb_tail = &rfcp->rfc_next;
+       spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
+       rfcpp = rfp->rcu_fwd_cb_tail;
+       rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
        WRITE_ONCE(*rfcpp, rfcp);
-       WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
-       i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
-       if (i >= ARRAY_SIZE(n_launders_hist))
-               i = ARRAY_SIZE(n_launders_hist) - 1;
-       n_launders_hist[i].n_launders++;
-       n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
-       spin_unlock_irqrestore(&rcu_fwd_lock, flags);
+       WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
+       i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
+       if (i >= ARRAY_SIZE(rfp->n_launders_hist))
+               i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
+       rfp->n_launders_hist[i].n_launders++;
+       rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
+       spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
 }
 
 // Give the scheduler a chance, even on nohz_full CPUs.
 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
 {
-       if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
+       if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
                // Real call_rcu() floods hit userspace, so emulate that.
                if (need_resched() || (iter & 0xfff))
                        schedule();
@@ -1744,23 +1754,23 @@ static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
  * Free all callbacks on the rcu_fwd_cb_head list, either because the
  * test is over or because we hit an OOM event.
  */
-static unsigned long rcu_torture_fwd_prog_cbfree(void)
+static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
 {
        unsigned long flags;
        unsigned long freed = 0;
        struct rcu_fwd_cb *rfcp;
 
        for (;;) {
-               spin_lock_irqsave(&rcu_fwd_lock, flags);
-               rfcp = rcu_fwd_cb_head;
+               spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
+               rfcp = rfp->rcu_fwd_cb_head;
                if (!rfcp) {
-                       spin_unlock_irqrestore(&rcu_fwd_lock, flags);
+                       spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
                        break;
                }
-               rcu_fwd_cb_head = rfcp->rfc_next;
-               if (!rcu_fwd_cb_head)
-                       rcu_fwd_cb_tail = &rcu_fwd_cb_head;
-               spin_unlock_irqrestore(&rcu_fwd_lock, flags);
+               rfp->rcu_fwd_cb_head = rfcp->rfc_next;
+               if (!rfp->rcu_fwd_cb_head)
+                       rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
+               spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
                kfree(rfcp);
                freed++;
                rcu_torture_fwd_prog_cond_resched(freed);
@@ -1774,7 +1784,8 @@ static unsigned long rcu_torture_fwd_prog_cbfree(void)
 }
 
 /* Carry out need_resched()/cond_resched() forward-progress testing. */
-static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
+static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
+                                   int *tested, int *tested_tries)
 {
        unsigned long cver;
        unsigned long dur;
@@ -1804,8 +1815,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
        sd = cur_ops->stall_dur() + 1;
        sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
        dur = sd4 + torture_random(&trs) % (sd - sd4);
-       WRITE_ONCE(rcu_fwd_startat, jiffies);
-       stopat = rcu_fwd_startat + dur;
+       WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
+       stopat = rfp->rcu_fwd_startat + dur;
        while (time_before(jiffies, stopat) &&
               !shutdown_time_arrived() &&
               !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
@@ -1840,7 +1851,7 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
 }
 
 /* Carry out call_rcu() forward-progress testing. */
-static void rcu_torture_fwd_prog_cr(void)
+static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
 {
        unsigned long cver;
        unsigned long flags;
@@ -1864,23 +1875,23 @@ static void rcu_torture_fwd_prog_cr(void)
        /* Loop continuously posting RCU callbacks. */
        WRITE_ONCE(rcu_fwd_cb_nodelay, true);
        cur_ops->sync(); /* Later readers see above write. */
-       WRITE_ONCE(rcu_fwd_startat, jiffies);
-       stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
+       WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
+       stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
        n_launders = 0;
-       n_launders_cb = 0;
+       rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
        n_launders_sa = 0;
        n_max_cbs = 0;
        n_max_gps = 0;
-       for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
-               n_launders_hist[i].n_launders = 0;
+       for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
+               rfp->n_launders_hist[i].n_launders = 0;
        cver = READ_ONCE(rcu_torture_current_version);
        gps = cur_ops->get_gp_seq();
-       rcu_launder_gp_seq_start = gps;
+       rfp->rcu_launder_gp_seq_start = gps;
        tick_dep_set_task(current, TICK_DEP_BIT_RCU);
        while (time_before(jiffies, stopat) &&
               !shutdown_time_arrived() &&
               !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
-               rfcp = READ_ONCE(rcu_fwd_cb_head);
+               rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
                rfcpn = NULL;
                if (rfcp)
                        rfcpn = READ_ONCE(rfcp->rfc_next);
@@ -1888,7 +1899,7 @@ static void rcu_torture_fwd_prog_cr(void)
                        if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
                            ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
                                break;
-                       rcu_fwd_cb_head = rfcpn;
+                       rfp->rcu_fwd_cb_head = rfcpn;
                        n_launders++;
                        n_launders_sa++;
                } else {
@@ -1900,6 +1911,7 @@ static void rcu_torture_fwd_prog_cr(void)
                        n_max_cbs++;
                        n_launders_sa = 0;
                        rfcp->rfc_gps = 0;
+                       rfcp->rfc_rfp = rfp;
                }
                cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
                rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
@@ -1910,22 +1922,22 @@ static void rcu_torture_fwd_prog_cr(void)
                }
        }
        stoppedat = jiffies;
-       n_launders_cb_snap = READ_ONCE(n_launders_cb);
+       n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
        cver = READ_ONCE(rcu_torture_current_version) - cver;
        gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
        cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
-       (void)rcu_torture_fwd_prog_cbfree();
+       (void)rcu_torture_fwd_prog_cbfree(rfp);
 
        if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
            !shutdown_time_arrived()) {
                WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
                pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
                         __func__,
-                        stoppedat - rcu_fwd_startat, jiffies - stoppedat,
+                        stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
                         n_launders + n_max_cbs - n_launders_cb_snap,
                         n_launders, n_launders_sa,
                         n_max_gps, n_max_cbs, cver, gps);
-               rcu_torture_fwd_cb_hist();
+               rcu_torture_fwd_cb_hist(rfp);
        }
        schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
        tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
@@ -1940,20 +1952,22 @@ static void rcu_torture_fwd_prog_cr(void)
 static int rcutorture_oom_notify(struct notifier_block *self,
                                 unsigned long notused, void *nfreed)
 {
+       struct rcu_fwd *rfp = rcu_fwds;
+
        WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
             __func__);
-       rcu_torture_fwd_cb_hist();
-       rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2);
+       rcu_torture_fwd_cb_hist(rfp);
+       rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
        WRITE_ONCE(rcu_fwd_emergency_stop, true);
        smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
        pr_info("%s: Freed %lu RCU callbacks.\n",
-               __func__, rcu_torture_fwd_prog_cbfree());
+               __func__, rcu_torture_fwd_prog_cbfree(rfp));
        rcu_barrier();
        pr_info("%s: Freed %lu RCU callbacks.\n",
-               __func__, rcu_torture_fwd_prog_cbfree());
+               __func__, rcu_torture_fwd_prog_cbfree(rfp));
        rcu_barrier();
        pr_info("%s: Freed %lu RCU callbacks.\n",
-               __func__, rcu_torture_fwd_prog_cbfree());
+               __func__, rcu_torture_fwd_prog_cbfree(rfp));
        smp_mb(); /* Frees before return to avoid redoing OOM. */
        (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
        pr_info("%s returning after OOM processing.\n", __func__);
@@ -1967,6 +1981,7 @@ static struct notifier_block rcutorture_oom_nb = {
 /* Carry out grace-period forward-progress testing. */
 static int rcu_torture_fwd_prog(void *args)
 {
+       struct rcu_fwd *rfp = args;
        int tested = 0;
        int tested_tries = 0;
 
@@ -1978,8 +1993,8 @@ static int rcu_torture_fwd_prog(void *args)
                schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
                WRITE_ONCE(rcu_fwd_emergency_stop, false);
                register_oom_notifier(&rcutorture_oom_nb);
-               rcu_torture_fwd_prog_nr(&tested, &tested_tries);
-               rcu_torture_fwd_prog_cr();
+               rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
+               rcu_torture_fwd_prog_cr(rfp);
                unregister_oom_notifier(&rcutorture_oom_nb);
 
                /* Avoid slow periods, better to test when busy. */
@@ -1995,6 +2010,8 @@ static int rcu_torture_fwd_prog(void *args)
 /* If forward-progress checking is requested and feasible, spawn the thread. */
 static int __init rcu_torture_fwd_prog_init(void)
 {
+       struct rcu_fwd *rfp;
+
        if (!fwd_progress)
                return 0; /* Not requested, so don't do it. */
        if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
@@ -2013,8 +2030,12 @@ static int __init rcu_torture_fwd_prog_init(void)
                fwd_progress_holdoff = 1;
        if (fwd_progress_div <= 0)
                fwd_progress_div = 4;
-       return torture_create_kthread(rcu_torture_fwd_prog,
-                                     NULL, fwd_prog_task);
+       rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
+       if (!rfp)
+               return -ENOMEM;
+       spin_lock_init(&rfp->rcu_fwd_lock);
+       rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
+       return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
 }
 
 /* Callback function for RCU barrier testing. */
index 44d6606b83257acde72fe92435de6245fe77131e..6208c1dae5c955a198470a2010739f44d2c747c4 100644 (file)
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
 /*
  * Workqueue handler to drive one grace period and invoke any callbacks
- * that become ready as a result.  Single-CPU and !PREEMPT operation
+ * that become ready as a result.  Single-CPU and !PREEMPTION operation
  * means that we get away with murder on synchronization.  ;-)
  */
 void srcu_drive_gp(struct work_struct *wp)
index 5dffade2d7cd03b05007adca298a141184cd4551..657e6a7d1c03e5dde1223fe2ca289acdd97777da 100644 (file)
@@ -530,7 +530,7 @@ static void srcu_gp_end(struct srcu_struct *ssp)
        idx = rcu_seq_state(ssp->srcu_gp_seq);
        WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
        cbdelay = srcu_get_delay(ssp);
-       ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+       WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
        rcu_seq_end(&ssp->srcu_gp_seq);
        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
@@ -762,6 +762,7 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
        unsigned long flags;
        struct srcu_data *sdp;
        unsigned long t;
+       unsigned long tlast;
 
        /* If the local srcu_data structure has callbacks, not idle.  */
        local_irq_save(flags);
@@ -780,9 +781,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
 
        /* First, see if enough time has passed since the last GP. */
        t = ktime_get_mono_fast_ns();
+       tlast = READ_ONCE(ssp->srcu_last_gp_end);
        if (exp_holdoff == 0 ||
-           time_in_range_open(t, ssp->srcu_last_gp_end,
-                              ssp->srcu_last_gp_end + exp_holdoff))
+           time_in_range_open(t, tlast, tlast + exp_holdoff))
                return false; /* Too soon after last GP. */
 
        /* Next, check for probable idleness. */
@@ -853,7 +854,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
        local_irq_save(flags);
        sdp = this_cpu_ptr(ssp->sda);
        spin_lock_rcu_node(sdp);
-       rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
+       rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
        rcu_segcblist_advance(&sdp->srcu_cblist,
                              rcu_seq_current(&ssp->srcu_gp_seq));
        s = rcu_seq_snap(&ssp->srcu_gp_seq);
@@ -1052,7 +1053,7 @@ void srcu_barrier(struct srcu_struct *ssp)
                sdp->srcu_barrier_head.func = srcu_barrier_cb;
                debug_rcu_head_queue(&sdp->srcu_barrier_head);
                if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
-                                          &sdp->srcu_barrier_head, 0)) {
+                                          &sdp->srcu_barrier_head)) {
                        debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
                        atomic_dec(&ssp->srcu_barrier_cpu_cnt);
                }
index 477b4eb44af5c9ea48c639f3ccf88156d54520b1..dd572ce7c7479caaa2a957dbb0a3fc65f4be386a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/time.h>
 #include <linux/cpu.h>
 #include <linux/prefetch.h>
+#include <linux/slab.h>
 
 #include "rcu.h"
 
@@ -73,6 +74,31 @@ void rcu_sched_clock_irq(int user)
        }
 }
 
+/*
+ * Reclaim the specified callback, either by invoking it for non-kfree cases or
+ * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
+ */
+static inline bool rcu_reclaim_tiny(struct rcu_head *head)
+{
+       rcu_callback_t f;
+       unsigned long offset = (unsigned long)head->func;
+
+       rcu_lock_acquire(&rcu_callback_map);
+       if (__is_kfree_rcu_offset(offset)) {
+               trace_rcu_invoke_kfree_callback("", head, offset);
+               kfree((void *)head - offset);
+               rcu_lock_release(&rcu_callback_map);
+               return true;
+       }
+
+       trace_rcu_invoke_callback("", head);
+       f = head->func;
+       WRITE_ONCE(head->func, (rcu_callback_t)0L);
+       f(head);
+       rcu_lock_release(&rcu_callback_map);
+       return false;
+}
+
 /* Invoke the RCU callbacks whose grace period has elapsed.  */
 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 {
@@ -100,7 +126,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
                prefetch(next);
                debug_rcu_head_unqueue(list);
                local_bh_disable();
-               __rcu_reclaim("", list);
+               rcu_reclaim_tiny(list);
                local_bh_enable();
                list = next;
        }
index 1694a6b57ad8c96e5614a385789084e0e963f837..d91c9156fab2ef0ad64ef1b31a77c3a10626d254 100644 (file)
@@ -43,7 +43,6 @@
 #include <uapi/linux/sched/types.h>
 #include <linux/prefetch.h>
 #include <linux/delay.h>
-#include <linux/stop_machine.h>
 #include <linux/random.h>
 #include <linux/trace_events.h>
 #include <linux/suspend.h>
@@ -55,6 +54,7 @@
 #include <linux/oom.h>
 #include <linux/smpboot.h>
 #include <linux/jiffies.h>
+#include <linux/slab.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/clock.h>
 #include "../time/tick-internal.h"
@@ -84,7 +84,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
        .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
 };
-struct rcu_state rcu_state = {
+static struct rcu_state rcu_state = {
        .level = { &rcu_state.node[0] },
        .gp_state = RCU_GP_IDLE,
        .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
@@ -188,7 +188,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
  * held, but the bit corresponding to the current CPU will be stable
  * in most contexts.
  */
-unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
+static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
 {
        return READ_ONCE(rnp->qsmaskinitnext);
 }
@@ -294,7 +294,7 @@ static void rcu_dynticks_eqs_online(void)
  *
  * No ordering, as we are sampling CPU-local information.
  */
-bool rcu_dynticks_curr_cpu_in_eqs(void)
+static bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
@@ -305,7 +305,7 @@ bool rcu_dynticks_curr_cpu_in_eqs(void)
  * Snapshot the ->dynticks counter with full ordering so as to allow
  * stable comparison of this counter with past and future snapshots.
  */
-int rcu_dynticks_snap(struct rcu_data *rdp)
+static int rcu_dynticks_snap(struct rcu_data *rdp)
 {
        int snap = atomic_add_return(0, &rdp->dynticks);
 
@@ -528,16 +528,6 @@ static struct rcu_node *rcu_get_root(void)
        return &rcu_state.node[0];
 }
 
-/*
- * Convert a ->gp_state value to a character string.
- */
-static const char *gp_state_getname(short gs)
-{
-       if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
-               return "???";
-       return gp_state_names[gs];
-}
-
 /*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
@@ -577,7 +567,7 @@ static void rcu_eqs_enter(bool user)
        }
 
        lockdep_assert_irqs_disabled();
-       trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
+       trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
        rdp = this_cpu_ptr(&rcu_data);
        do_nocb_deferred_wakeup(rdp);
@@ -650,14 +640,15 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
         * leave it in non-RCU-idle state.
         */
        if (rdp->dynticks_nmi_nesting != 1) {
-               trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
+               trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
+                                 atomic_read(&rdp->dynticks));
                WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
                           rdp->dynticks_nmi_nesting - 2);
                return;
        }
 
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
-       trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
+       trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
        WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 
        if (irq)
@@ -744,7 +735,7 @@ static void rcu_eqs_exit(bool user)
        rcu_dynticks_task_exit();
        rcu_dynticks_eqs_exit();
        rcu_cleanup_after_idle();
-       trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
+       trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
        WRITE_ONCE(rdp->dynticks_nesting, 1);
        WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
@@ -800,8 +791,8 @@ void rcu_user_exit(void)
  */
 static __always_inline void rcu_nmi_enter_common(bool irq)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        long incby = 2;
+       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
        /* Complain about underflow. */
        WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
@@ -828,12 +819,17 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
        } else if (tick_nohz_full_cpu(rdp->cpu) &&
                   rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
                   READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
-               rdp->rcu_forced_tick = true;
-               tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
+               raw_spin_lock_rcu_node(rdp->mynode);
+               // Recheck under lock.
+               if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
+                       rdp->rcu_forced_tick = true;
+                       tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
+               }
+               raw_spin_unlock_rcu_node(rdp->mynode);
        }
        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
                          rdp->dynticks_nmi_nesting,
-                         rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
+                         rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
        WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
                   rdp->dynticks_nmi_nesting + incby);
        barrier();
@@ -898,6 +894,7 @@ void rcu_irq_enter_irqson(void)
  */
 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
 {
+       raw_lockdep_assert_held_rcu_node(rdp->mynode);
        WRITE_ONCE(rdp->rcu_urgent_qs, false);
        WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
        if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
@@ -1934,7 +1931,7 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
        struct rcu_node *rnp_p;
 
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) ||
+       if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
            WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
            rnp->qsmask != 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2146,7 +2143,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
        /* If no callbacks are ready, just return. */
        if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
                trace_rcu_batch_start(rcu_state.name,
-                                     rcu_segcblist_n_lazy_cbs(&rdp->cblist),
                                      rcu_segcblist_n_cbs(&rdp->cblist), 0);
                trace_rcu_batch_end(rcu_state.name, 0,
                                    !rcu_segcblist_empty(&rdp->cblist),
@@ -2168,7 +2164,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
        if (unlikely(bl > 100))
                tlimit = local_clock() + rcu_resched_ns;
        trace_rcu_batch_start(rcu_state.name,
-                             rcu_segcblist_n_lazy_cbs(&rdp->cblist),
                              rcu_segcblist_n_cbs(&rdp->cblist), bl);
        rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
        if (offloaded)
@@ -2179,9 +2174,19 @@ static void rcu_do_batch(struct rcu_data *rdp)
        tick_dep_set_task(current, TICK_DEP_BIT_RCU);
        rhp = rcu_cblist_dequeue(&rcl);
        for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
+               rcu_callback_t f;
+
                debug_rcu_head_unqueue(rhp);
-               if (__rcu_reclaim(rcu_state.name, rhp))
-                       rcu_cblist_dequeued_lazy(&rcl);
+
+               rcu_lock_acquire(&rcu_callback_map);
+               trace_rcu_invoke_callback(rcu_state.name, rhp);
+
+               f = rhp->func;
+               WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
+               f(rhp);
+
+               rcu_lock_release(&rcu_callback_map);
+
                /*
                 * Stop only if limit reached and CPU has something to do.
                 * Note: The rcl structure counts down from zero.
@@ -2294,7 +2299,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
                mask = 0;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->qsmask == 0) {
-                       if (!IS_ENABLED(CONFIG_PREEMPTION) ||
+                       if (!IS_ENABLED(CONFIG_PREEMPT_RCU) ||
                            rcu_preempt_blocked_readers_cgp(rnp)) {
                                /*
                                 * No point in scanning bits because they
@@ -2308,14 +2313,11 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        continue;
                }
-               for_each_leaf_node_possible_cpu(rnp, cpu) {
-                       unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
-                       if ((rnp->qsmask & bit) != 0) {
-                               rdp = per_cpu_ptr(&rcu_data, cpu);
-                               if (f(rdp)) {
-                                       mask |= bit;
-                                       rcu_disable_urgency_upon_qs(rdp);
-                               }
+               for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
+                       rdp = per_cpu_ptr(&rcu_data, cpu);
+                       if (f(rdp)) {
+                               mask |= rdp->grpmask;
+                               rcu_disable_urgency_upon_qs(rdp);
                        }
                }
                if (mask != 0) {
@@ -2474,8 +2476,8 @@ static void rcu_cpu_kthread(unsigned int cpu)
        char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
        int spincnt;
 
+       trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
        for (spincnt = 0; spincnt < 10; spincnt++) {
-               trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
                local_bh_disable();
                *statusp = RCU_KTHREAD_RUNNING;
                local_irq_disable();
@@ -2583,7 +2585,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
  * is expected to specify a CPU.
  */
 static void
-__call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
+__call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
        unsigned long flags;
        struct rcu_data *rdp;
@@ -2618,18 +2620,17 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
                if (rcu_segcblist_empty(&rdp->cblist))
                        rcu_segcblist_init(&rdp->cblist);
        }
+
        if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
                return; // Enqueued onto ->nocb_bypass, so just leave.
        /* If we get here, rcu_nocb_try_bypass() acquired ->nocb_lock. */
-       rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
+       rcu_segcblist_enqueue(&rdp->cblist, head);
        if (__is_kfree_rcu_offset((unsigned long)func))
                trace_rcu_kfree_callback(rcu_state.name, head,
                                         (unsigned long)func,
-                                        rcu_segcblist_n_lazy_cbs(&rdp->cblist),
                                         rcu_segcblist_n_cbs(&rdp->cblist));
        else
                trace_rcu_callback(rcu_state.name, head,
-                                  rcu_segcblist_n_lazy_cbs(&rdp->cblist),
                                   rcu_segcblist_n_cbs(&rdp->cblist));
 
        /* Go handle any RCU core processing required. */
@@ -2679,28 +2680,230 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
  */
 void call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
-       __call_rcu(head, func, 0);
+       __call_rcu(head, func);
 }
 EXPORT_SYMBOL_GPL(call_rcu);
 
+
+/* Maximum number of jiffies to wait before draining a batch. */
+#define KFREE_DRAIN_JIFFIES (HZ / 50)
+#define KFREE_N_BATCHES 2
+
+/**
+ * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
+ * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
+ * @head_free: List of kfree_rcu() objects waiting for a grace period
+ * @krcp: Pointer to @kfree_rcu_cpu structure
+ */
+
+struct kfree_rcu_cpu_work {
+       struct rcu_work rcu_work;
+       struct rcu_head *head_free;
+       struct kfree_rcu_cpu *krcp;
+};
+
+/**
+ * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
+ * @head: List of kfree_rcu() objects not yet waiting for a grace period
+ * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
+ * @lock: Synchronize access to this structure
+ * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
+ * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
+ * @initialized: The @lock and @rcu_work fields have been initialized
+ *
+ * This is a per-CPU structure.  The reason that it is not included in
+ * the rcu_data structure is to permit this code to be extracted from
+ * the RCU files.  Such extraction could allow further optimization of
+ * the interactions with the slab allocators.
+ */
+struct kfree_rcu_cpu {
+       struct rcu_head *head;
+       struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
+       spinlock_t lock;
+       struct delayed_work monitor_work;
+       bool monitor_todo;
+       bool initialized;
+};
+
+static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc);
+
+/*
+ * This function is invoked in workqueue context after a grace period.
+ * It frees all the objects queued on ->head_free.
+ */
+static void kfree_rcu_work(struct work_struct *work)
+{
+       unsigned long flags;
+       struct rcu_head *head, *next;
+       struct kfree_rcu_cpu *krcp;
+       struct kfree_rcu_cpu_work *krwp;
+
+       krwp = container_of(to_rcu_work(work),
+                           struct kfree_rcu_cpu_work, rcu_work);
+       krcp = krwp->krcp;
+       spin_lock_irqsave(&krcp->lock, flags);
+       head = krwp->head_free;
+       krwp->head_free = NULL;
+       spin_unlock_irqrestore(&krcp->lock, flags);
+
+       // List "head" is now private, so traverse locklessly.
+       for (; head; head = next) {
+               unsigned long offset = (unsigned long)head->func;
+
+               next = head->next;
+               // Potentially optimize with kfree_bulk in future.
+               debug_rcu_head_unqueue(head);
+               rcu_lock_acquire(&rcu_callback_map);
+               trace_rcu_invoke_kfree_callback(rcu_state.name, head, offset);
+
+               if (!WARN_ON_ONCE(!__is_kfree_rcu_offset(offset))) {
+                       /* Could be optimized with kfree_bulk() in future. */
+                       kfree((void *)head - offset);
+               }
+
+               rcu_lock_release(&rcu_callback_map);
+               cond_resched_tasks_rcu_qs();
+       }
+}
+
 /*
- * Queue an RCU callback for lazy invocation after a grace period.
- * This will likely be later named something like "call_rcu_lazy()",
- * but this change will require some way of tagging the lazy RCU
- * callbacks in the list of pending callbacks. Until then, this
- * function may only be called from __kfree_rcu().
+ * Schedule the kfree batch RCU work to run in workqueue context after a GP.
+ *
+ * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
+ * timeout has been reached.
+ */
+static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
+{
+       int i;
+       struct kfree_rcu_cpu_work *krwp = NULL;
+
+       lockdep_assert_held(&krcp->lock);
+       for (i = 0; i < KFREE_N_BATCHES; i++)
+               if (!krcp->krw_arr[i].head_free) {
+                       krwp = &(krcp->krw_arr[i]);
+                       break;
+               }
+
+       // If a previous RCU batch is in progress, we cannot immediately
+       // queue another one, so return false to tell caller to retry.
+       if (!krwp)
+               return false;
+
+       krwp->head_free = krcp->head;
+       krcp->head = NULL;
+       INIT_RCU_WORK(&krwp->rcu_work, kfree_rcu_work);
+       queue_rcu_work(system_wq, &krwp->rcu_work);
+       return true;
+}
+
+static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
+                                         unsigned long flags)
+{
+       // Attempt to start a new batch.
+       krcp->monitor_todo = false;
+       if (queue_kfree_rcu_work(krcp)) {
+               // Success! Our job is done here.
+               spin_unlock_irqrestore(&krcp->lock, flags);
+               return;
+       }
+
+       // Previous RCU batch still in progress, try again later.
+       krcp->monitor_todo = true;
+       schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+       spin_unlock_irqrestore(&krcp->lock, flags);
+}
+
+/*
+ * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
+ * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
+ */
+static void kfree_rcu_monitor(struct work_struct *work)
+{
+       unsigned long flags;
+       struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
+                                                monitor_work.work);
+
+       spin_lock_irqsave(&krcp->lock, flags);
+       if (krcp->monitor_todo)
+               kfree_rcu_drain_unlock(krcp, flags);
+       else
+               spin_unlock_irqrestore(&krcp->lock, flags);
+}
+
+/*
+ * Queue a request for lazy invocation of kfree() after a grace period.
+ *
+ * Each kfree_call_rcu() request is added to a batch. The batch will be drained
+ * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch
+ * will be kfree'd in workqueue context. This allows us to:
+ *
+ * 1.  Batch requests together to reduce the number of grace periods during
+ *     heavy kfree_rcu() load.
+ *
+ * 2.  It makes it possible to use kfree_bulk() on a large number of
+ *     kfree_rcu() requests thus reducing cache misses and the per-object
+ *     overhead of kfree().
  */
 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
-       __call_rcu(head, func, 1);
+       unsigned long flags;
+       struct kfree_rcu_cpu *krcp;
+
+       local_irq_save(flags);  // For safely calling this_cpu_ptr().
+       krcp = this_cpu_ptr(&krc);
+       if (krcp->initialized)
+               spin_lock(&krcp->lock);
+
+       // Queue the object but don't yet schedule the batch.
+       if (debug_rcu_head_queue(head)) {
+               // Probable double kfree_rcu(), just leak.
+               WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
+                         __func__, head);
+               goto unlock_return;
+       }
+       head->func = func;
+       head->next = krcp->head;
+       krcp->head = head;
+
+       // Set timer to drain after KFREE_DRAIN_JIFFIES.
+       if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
+           !krcp->monitor_todo) {
+               krcp->monitor_todo = true;
+               schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+       }
+
+unlock_return:
+       if (krcp->initialized)
+               spin_unlock(&krcp->lock);
+       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(kfree_call_rcu);
 
+void __init kfree_rcu_scheduler_running(void)
+{
+       int cpu;
+       unsigned long flags;
+
+       for_each_online_cpu(cpu) {
+               struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
+
+               spin_lock_irqsave(&krcp->lock, flags);
+               if (!krcp->head || krcp->monitor_todo) {
+                       spin_unlock_irqrestore(&krcp->lock, flags);
+                       continue;
+               }
+               krcp->monitor_todo = true;
+               schedule_delayed_work_on(cpu, &krcp->monitor_work,
+                                        KFREE_DRAIN_JIFFIES);
+               spin_unlock_irqrestore(&krcp->lock, flags);
+       }
+}
+
 /*
  * During early boot, any blocking grace-period wait automatically
- * implies a grace period.  Later on, this is never the case for PREEMPT.
+ * implies a grace period.  Later on, this is never the case for PREEMPTION.
  *
- * Howevr, because a context switch is a grace period for !PREEMPT, any
+ * Howevr, because a context switch is a grace period for !PREEMPTION, any
  * blocking grace-period wait automatically implies a grace period if
  * there is only one CPU online at any point time during execution of
  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
@@ -2896,7 +3099,7 @@ static void rcu_barrier_func(void *unused)
        debug_rcu_head_queue(&rdp->barrier_head);
        rcu_nocb_lock(rdp);
        WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
-       if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
+       if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
                atomic_inc(&rcu_state.barrier_cpu_count);
        } else {
                debug_rcu_head_unqueue(&rdp->barrier_head);
@@ -3557,12 +3760,29 @@ static void __init rcu_dump_rcu_node_tree(void)
 struct workqueue_struct *rcu_gp_wq;
 struct workqueue_struct *rcu_par_gp_wq;
 
+static void __init kfree_rcu_batch_init(void)
+{
+       int cpu;
+       int i;
+
+       for_each_possible_cpu(cpu) {
+               struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
+
+               spin_lock_init(&krcp->lock);
+               for (i = 0; i < KFREE_N_BATCHES; i++)
+                       krcp->krw_arr[i].krcp = krcp;
+               INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
+               krcp->initialized = true;
+       }
+}
+
 void __init rcu_init(void)
 {
        int cpu;
 
        rcu_early_boot_tests();
 
+       kfree_rcu_batch_init();
        rcu_bootup_announce();
        rcu_init_geometry();
        rcu_init_one();
index 055c31781d3ae1a6665136da5a66dd178083f559..0c87e4c161c2fa9f382afc811378bbec84d8e01a 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
 #include <linux/swait.h>
-#include <linux/stop_machine.h>
 #include <linux/rcu_node_tree.h>
 
 #include "rcu_segcblist.h"
@@ -182,8 +181,8 @@ struct rcu_data {
        bool rcu_need_heavy_qs;         /* GP old, so heavy quiescent state! */
        bool rcu_urgent_qs;             /* GP old need light quiescent state. */
        bool rcu_forced_tick;           /* Forced tick to provide QS. */
+       bool rcu_forced_tick_exp;       /*   ... provide QS to expedited GP. */
 #ifdef CONFIG_RCU_FAST_NO_HZ
-       bool all_lazy;                  /* All CPU's CBs lazy at idle start? */
        unsigned long last_accelerate;  /* Last jiffy CBs were accelerated. */
        unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
        int tick_nohz_enabled_snap;     /* Previously seen value from sysfs. */
@@ -368,18 +367,6 @@ struct rcu_state {
 #define RCU_GP_CLEANUP   7     /* Grace-period cleanup started. */
 #define RCU_GP_CLEANED   8     /* Grace-period cleanup complete. */
 
-static const char * const gp_state_names[] = {
-       "RCU_GP_IDLE",
-       "RCU_GP_WAIT_GPS",
-       "RCU_GP_DONE_GPS",
-       "RCU_GP_ONOFF",
-       "RCU_GP_INIT",
-       "RCU_GP_WAIT_FQS",
-       "RCU_GP_DOING_FQS",
-       "RCU_GP_CLEANUP",
-       "RCU_GP_CLEANED",
-};
-
 /*
  * In order to export the rcu_state name to the tracing tools, it
  * needs to be added in the __tracepoint_string section.
@@ -403,8 +390,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
 #define RCU_NAME rcu_name
 #endif /* #else #ifdef CONFIG_TRACING */
 
-int rcu_dynticks_snap(struct rcu_data *rdp);
-
 /* Forward declarations for tree_plugin.h */
 static void rcu_bootup_announce(void);
 static void rcu_qs(void);
@@ -415,7 +400,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 static void rcu_flavor_sched_clock_irq(int user);
-void call_rcu(struct rcu_head *head, rcu_callback_t func);
 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
index d632cd01959755361b7fa588a8bc947439c1b61d..6935a9e2b094ce65688f365617024adae154201f 100644 (file)
@@ -21,7 +21,7 @@ static void rcu_exp_gp_seq_start(void)
 }
 
 /*
- * Return then value that expedited-grace-period counter will have
+ * Return the value that the expedited-grace-period counter will have
  * at the end of the current grace period.
  */
 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
@@ -39,7 +39,9 @@ static void rcu_exp_gp_seq_end(void)
 }
 
 /*
- * Take a snapshot of the expedited-grace-period counter.
+ * Take a snapshot of the expedited-grace-period counter, which is the
+ * earliest value that will indicate that a full grace period has
+ * elapsed since the current time.
  */
 static unsigned long rcu_exp_gp_seq_snap(void)
 {
@@ -134,7 +136,7 @@ static void __maybe_unused sync_exp_reset_tree(void)
        rcu_for_each_node_breadth_first(rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                WARN_ON_ONCE(rnp->expmask);
-               rnp->expmask = rnp->expmaskinit;
+               WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 }
@@ -143,31 +145,26 @@ static void __maybe_unused sync_exp_reset_tree(void)
  * Return non-zero if there is no RCU expedited grace period in progress
  * for the specified rcu_node structure, in other words, if all CPUs and
  * tasks covered by the specified rcu_node structure have done their bit
- * for the current expedited grace period.  Works only for preemptible
- * RCU -- other RCU implementation use other means.
- *
- * Caller must hold the specificed rcu_node structure's ->lock
+ * for the current expedited grace period.
  */
-static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
+static bool sync_rcu_exp_done(struct rcu_node *rnp)
 {
        raw_lockdep_assert_held_rcu_node(rnp);
-
        return rnp->exp_tasks == NULL &&
               READ_ONCE(rnp->expmask) == 0;
 }
 
 /*
- * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
- * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
- * itself
+ * Like sync_rcu_exp_done(), but where the caller does not hold the
+ * rcu_node's ->lock.
  */
-static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
+static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
 {
        unsigned long flags;
        bool ret;
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       ret = sync_rcu_preempt_exp_done(rnp);
+       ret = sync_rcu_exp_done(rnp);
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
        return ret;
@@ -181,8 +178,6 @@ static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
  * which the task was queued or to one of that rcu_node structure's ancestors,
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
- *
- * Caller must hold the specified rcu_node structure's ->lock.
  */
 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
                                 bool wake, unsigned long flags)
@@ -190,8 +185,9 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
 {
        unsigned long mask;
 
+       raw_lockdep_assert_held_rcu_node(rnp);
        for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp)) {
+               if (!sync_rcu_exp_done(rnp)) {
                        if (!rnp->expmask)
                                rcu_initiate_boost(rnp, flags);
                        else
@@ -211,7 +207,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
                rnp = rnp->parent;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
                WARN_ON_ONCE(!(rnp->expmask & mask));
-               rnp->expmask &= ~mask;
+               WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
        }
 }
 
@@ -234,14 +230,23 @@ static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
                                    unsigned long mask, bool wake)
 {
+       int cpu;
        unsigned long flags;
+       struct rcu_data *rdp;
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (!(rnp->expmask & mask)) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
-       rnp->expmask &= ~mask;
+       WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
+       for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
+                       continue;
+               rdp->rcu_forced_tick_exp = false;
+               tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
+       }
        __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
 }
 
@@ -345,8 +350,8 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
        /* Each pass checks a CPU for identity, offline, and idle. */
        mask_ofl_test = 0;
        for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
-               unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
                struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+               unsigned long mask = rdp->grpmask;
                int snap;
 
                if (raw_smp_processor_id() == cpu ||
@@ -372,12 +377,10 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
        /* IPI the remaining CPUs for expedited quiescent state. */
-       for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
-               unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
+       for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
                struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+               unsigned long mask = rdp->grpmask;
 
-               if (!(mask_ofl_ipi & mask))
-                       continue;
 retry_ipi:
                if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
                        mask_ofl_test |= mask;
@@ -389,10 +392,10 @@ retry_ipi:
                }
                ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
                put_cpu();
-               if (!ret) {
-                       mask_ofl_ipi &= ~mask;
+               /* The CPU will report the QS in response to the IPI. */
+               if (!ret)
                        continue;
-               }
+
                /* Failed, raced with CPU hotplug operation. */
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if ((rnp->qsmaskinitnext & mask) &&
@@ -403,13 +406,12 @@ retry_ipi:
                        schedule_timeout_uninterruptible(1);
                        goto retry_ipi;
                }
-               /* CPU really is offline, so we can ignore it. */
-               if (!(rnp->expmask & mask))
-                       mask_ofl_ipi &= ~mask;
+               /* CPU really is offline, so we must report its QS. */
+               if (rnp->expmask & mask)
+                       mask_ofl_test |= mask;
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
        /* Report quiescent states for those that went offline. */
-       mask_ofl_test |= mask_ofl_ipi;
        if (mask_ofl_test)
                rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
 }
@@ -456,29 +458,62 @@ static void sync_rcu_exp_select_cpus(void)
                        flush_work(&rnp->rew.rew_work);
 }
 
-static void synchronize_sched_expedited_wait(void)
+/*
+ * Wait for the expedited grace period to elapse, within time limit.
+ * If the time limit is exceeded without the grace period elapsing,
+ * return false, otherwise return true.
+ */
+static bool synchronize_rcu_expedited_wait_once(long tlimit)
+{
+       int t;
+       struct rcu_node *rnp_root = rcu_get_root();
+
+       t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
+                                         sync_rcu_exp_done_unlocked(rnp_root),
+                                         tlimit);
+       // Workqueues should not be signaled.
+       if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
+               return true;
+       WARN_ON(t < 0);  /* workqueues should not be signaled. */
+       return false;
+}
+
+/*
+ * Wait for the expedited grace period to elapse, issuing any needed
+ * RCU CPU stall warnings along the way.
+ */
+static void synchronize_rcu_expedited_wait(void)
 {
        int cpu;
        unsigned long jiffies_stall;
        unsigned long jiffies_start;
        unsigned long mask;
        int ndetected;
+       struct rcu_data *rdp;
        struct rcu_node *rnp;
        struct rcu_node *rnp_root = rcu_get_root();
-       int ret;
 
        trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
        jiffies_stall = rcu_jiffies_till_stall_check();
        jiffies_start = jiffies;
+       if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
+               if (synchronize_rcu_expedited_wait_once(1))
+                       return;
+               rcu_for_each_leaf_node(rnp) {
+                       for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
+                               rdp = per_cpu_ptr(&rcu_data, cpu);
+                               if (rdp->rcu_forced_tick_exp)
+                                       continue;
+                               rdp->rcu_forced_tick_exp = true;
+                               tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
+                       }
+               }
+               WARN_ON_ONCE(1);
+       }
 
        for (;;) {
-               ret = swait_event_timeout_exclusive(
-                               rcu_state.expedited_wq,
-                               sync_rcu_preempt_exp_done_unlocked(rnp_root),
-                               jiffies_stall);
-               if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
+               if (synchronize_rcu_expedited_wait_once(jiffies_stall))
                        return;
-               WARN_ON(ret < 0);  /* workqueues should not be signaled. */
                if (rcu_cpu_stall_suppress)
                        continue;
                panic_on_rcu_stall();
@@ -491,7 +526,7 @@ static void synchronize_sched_expedited_wait(void)
                                struct rcu_data *rdp;
 
                                mask = leaf_node_cpu_bit(rnp, cpu);
-                               if (!(rnp->expmask & mask))
+                               if (!(READ_ONCE(rnp->expmask) & mask))
                                        continue;
                                ndetected++;
                                rdp = per_cpu_ptr(&rcu_data, cpu);
@@ -503,17 +538,18 @@ static void synchronize_sched_expedited_wait(void)
                }
                pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
                        jiffies - jiffies_start, rcu_state.expedited_sequence,
-                       rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
+                       READ_ONCE(rnp_root->expmask),
+                       ".T"[!!rnp_root->exp_tasks]);
                if (ndetected) {
                        pr_err("blocking rcu_node structures:");
                        rcu_for_each_node_breadth_first(rnp) {
                                if (rnp == rnp_root)
                                        continue; /* printed unconditionally */
-                               if (sync_rcu_preempt_exp_done_unlocked(rnp))
+                               if (sync_rcu_exp_done_unlocked(rnp))
                                        continue;
                                pr_cont(" l=%u:%d-%d:%#lx/%c",
                                        rnp->level, rnp->grplo, rnp->grphi,
-                                       rnp->expmask,
+                                       READ_ONCE(rnp->expmask),
                                        ".T"[!!rnp->exp_tasks]);
                        }
                        pr_cont("\n");
@@ -521,7 +557,7 @@ static void synchronize_sched_expedited_wait(void)
                rcu_for_each_leaf_node(rnp) {
                        for_each_leaf_node_possible_cpu(rnp, cpu) {
                                mask = leaf_node_cpu_bit(rnp, cpu);
-                               if (!(rnp->expmask & mask))
+                               if (!(READ_ONCE(rnp->expmask) & mask))
                                        continue;
                                dump_cpu_task(cpu);
                        }
@@ -540,15 +576,14 @@ static void rcu_exp_wait_wake(unsigned long s)
 {
        struct rcu_node *rnp;
 
-       synchronize_sched_expedited_wait();
-       rcu_exp_gp_seq_end();
-       trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
+       synchronize_rcu_expedited_wait();
 
-       /*
-        * Switch over to wakeup mode, allowing the next GP, but -only- the
-        * next GP, to proceed.
-        */
+       // Switch over to wakeup mode, allowing the next GP to proceed.
+       // End the previous grace period only after acquiring the mutex
+       // to ensure that only one GP runs concurrently with wakeups.
        mutex_lock(&rcu_state.exp_wake_mutex);
+       rcu_exp_gp_seq_end();
+       trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 
        rcu_for_each_node_breadth_first(rnp) {
                if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
@@ -559,7 +594,7 @@ static void rcu_exp_wait_wake(unsigned long s)
                        spin_unlock(&rnp->exp_lock);
                }
                smp_mb(); /* All above changes before wakeup. */
-               wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
+               wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
        }
        trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
        mutex_unlock(&rcu_state.exp_wake_mutex);
@@ -610,7 +645,7 @@ static void rcu_exp_handler(void *unused)
         * critical section.  If also enabled or idle, immediately
         * report the quiescent state, otherwise defer.
         */
-       if (!t->rcu_read_lock_nesting) {
+       if (!rcu_preempt_depth()) {
                if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
                    rcu_dynticks_curr_cpu_in_eqs()) {
                        rcu_report_exp_rdp(rdp);
@@ -634,7 +669,7 @@ static void rcu_exp_handler(void *unused)
         * can have caused this quiescent state to already have been
         * reported, so we really do need to check ->expmask.
         */
-       if (t->rcu_read_lock_nesting > 0) {
+       if (rcu_preempt_depth() > 0) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmask & rdp->grpmask) {
                        rdp->exp_deferred_qs = true;
@@ -670,7 +705,7 @@ static void rcu_exp_handler(void *unused)
        }
 }
 
-/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
+/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
 static void sync_sched_exp_online_cleanup(int cpu)
 {
 }
@@ -785,7 +820,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * implementations, it is still unfriendly to real-time workloads, so is
  * thus not recommended for any sort of common-case code.  In fact, if
  * you are using synchronize_rcu_expedited() in a loop, please restructure
- * your code to batch your updates, and then Use a single synchronize_rcu()
+ * your code to batch your updates, and then use a single synchronize_rcu()
  * instead.
  *
  * This has the same semantics as (but is more brutal than) synchronize_rcu().
index fa08d55f7040c0aa6a9976b328cb8c49195824d6..c6ea81cd41890e8f51ff6da9eb5565d34d1ffcef 100644 (file)
@@ -220,7 +220,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * blocked tasks.
         */
        if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
-               rnp->gp_tasks = &t->rcu_node_entry;
+               WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
                WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
        }
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
@@ -290,8 +290,8 @@ void rcu_note_context_switch(bool preempt)
 
        trace_rcu_utilization(TPS("Start context switch"));
        lockdep_assert_irqs_disabled();
-       WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
-       if (t->rcu_read_lock_nesting > 0 &&
+       WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
+       if (rcu_preempt_depth() > 0 &&
            !t->rcu_read_unlock_special.b.blocked) {
 
                /* Possibly blocking in an RCU read-side critical section. */
@@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  */
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 {
-       return rnp->gp_tasks != NULL;
+       return READ_ONCE(rnp->gp_tasks) != NULL;
 }
 
 /* Bias and limit values for ->rcu_read_lock_nesting. */
@@ -348,6 +348,21 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 #define RCU_NEST_NMAX (-INT_MAX / 2)
 #define RCU_NEST_PMAX (INT_MAX / 2)
 
+static void rcu_preempt_read_enter(void)
+{
+       current->rcu_read_lock_nesting++;
+}
+
+static void rcu_preempt_read_exit(void)
+{
+       current->rcu_read_lock_nesting--;
+}
+
+static void rcu_preempt_depth_set(int val)
+{
+       current->rcu_read_lock_nesting = val;
+}
+
 /*
  * Preemptible RCU implementation for rcu_read_lock().
  * Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -355,9 +370,9 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
  */
 void __rcu_read_lock(void)
 {
-       current->rcu_read_lock_nesting++;
+       rcu_preempt_read_enter();
        if (IS_ENABLED(CONFIG_PROVE_LOCKING))
-               WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
+               WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
        barrier();  /* critical section after entry code. */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -373,19 +388,19 @@ void __rcu_read_unlock(void)
 {
        struct task_struct *t = current;
 
-       if (t->rcu_read_lock_nesting != 1) {
-               --t->rcu_read_lock_nesting;
+       if (rcu_preempt_depth() != 1) {
+               rcu_preempt_read_exit();
        } else {
                barrier();  /* critical section before exit code. */
-               t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
+               rcu_preempt_depth_set(-RCU_NEST_BIAS);
                barrier();  /* assign before ->rcu_read_unlock_special load */
                if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
                        rcu_read_unlock_special(t);
                barrier();  /* ->rcu_read_unlock_special load before assign */
-               t->rcu_read_lock_nesting = 0;
+               rcu_preempt_depth_set(0);
        }
        if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
-               int rrln = t->rcu_read_lock_nesting;
+               int rrln = rcu_preempt_depth();
 
                WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
        }
@@ -444,15 +459,9 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                local_irq_restore(flags);
                return;
        }
-       t->rcu_read_unlock_special.b.deferred_qs = false;
-       if (special.b.need_qs) {
+       t->rcu_read_unlock_special.s = 0;
+       if (special.b.need_qs)
                rcu_qs();
-               t->rcu_read_unlock_special.b.need_qs = false;
-               if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
-                       local_irq_restore(flags);
-                       return;
-               }
-       }
 
        /*
         * Respond to a request by an expedited grace period for a
@@ -460,17 +469,11 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
         * tasks are handled when removing the task from the
         * blocked-tasks list below.
         */
-       if (rdp->exp_deferred_qs) {
+       if (rdp->exp_deferred_qs)
                rcu_report_exp_rdp(rdp);
-               if (!t->rcu_read_unlock_special.s) {
-                       local_irq_restore(flags);
-                       return;
-               }
-       }
 
        /* Clean up if blocked during RCU read-side critical section. */
        if (special.b.blocked) {
-               t->rcu_read_unlock_special.b.blocked = false;
 
                /*
                 * Remove this task from the list it blocked on.  The task
@@ -485,7 +488,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
                WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
                             (!empty_norm || rnp->qsmask));
-               empty_exp = sync_rcu_preempt_exp_done(rnp);
+               empty_exp = sync_rcu_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
@@ -493,7 +496,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
                                                rnp->gp_seq, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
-                       rnp->gp_tasks = np;
+                       WRITE_ONCE(rnp->gp_tasks, np);
                if (&t->rcu_node_entry == rnp->exp_tasks)
                        rnp->exp_tasks = np;
                if (IS_ENABLED(CONFIG_RCU_BOOST)) {
@@ -509,7 +512,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
                 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
                 * so we must take a snapshot of the expedited state.
                 */
-               empty_exp_now = sync_rcu_preempt_exp_done(rnp);
+               empty_exp_now = sync_rcu_exp_done(rnp);
                if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
                                                         rnp->gp_seq,
@@ -551,7 +554,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 {
        return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
                READ_ONCE(t->rcu_read_unlock_special.s)) &&
-              t->rcu_read_lock_nesting <= 0;
+              rcu_preempt_depth() <= 0;
 }
 
 /*
@@ -564,16 +567,16 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 static void rcu_preempt_deferred_qs(struct task_struct *t)
 {
        unsigned long flags;
-       bool couldrecurse = t->rcu_read_lock_nesting >= 0;
+       bool couldrecurse = rcu_preempt_depth() >= 0;
 
        if (!rcu_preempt_need_deferred_qs(t))
                return;
        if (couldrecurse)
-               t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
+               rcu_preempt_depth_set(rcu_preempt_depth() - RCU_NEST_BIAS);
        local_irq_save(flags);
        rcu_preempt_deferred_qs_irqrestore(t, flags);
        if (couldrecurse)
-               t->rcu_read_lock_nesting += RCU_NEST_BIAS;
+               rcu_preempt_depth_set(rcu_preempt_depth() + RCU_NEST_BIAS);
 }
 
 /*
@@ -610,9 +613,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
                struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
                struct rcu_node *rnp = rdp->mynode;
 
-               t->rcu_read_unlock_special.b.exp_hint = false;
                exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) ||
-                     (rdp->grpmask & rnp->expmask) ||
+                     (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
                      tick_nohz_full_cpu(rdp->cpu);
                // Need to defer quiescent state until everything is enabled.
                if (irqs_were_disabled && use_softirq &&
@@ -640,7 +642,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
                local_irq_restore(flags);
                return;
        }
-       WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
        rcu_preempt_deferred_qs_irqrestore(t, flags);
 }
 
@@ -648,8 +649,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
  * period that still has RCU readers blocked!  This function must be
- * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
- * must be held by the caller.
+ * invoked -before- updating this rnp's ->gp_seq.
  *
  * Also, if there are blocked tasks on the list, they automatically
  * block the newly created grace period, so set up ->gp_tasks accordingly.
@@ -659,11 +659,12 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
        struct task_struct *t;
 
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
+       raw_lockdep_assert_held_rcu_node(rnp);
        if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
                dump_blkd_tasks(rnp, 10);
        if (rcu_preempt_has_tasks(rnp) &&
            (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
-               rnp->gp_tasks = rnp->blkd_tasks.next;
+               WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
                t = container_of(rnp->gp_tasks, struct task_struct,
                                 rcu_node_entry);
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
@@ -686,7 +687,7 @@ static void rcu_flavor_sched_clock_irq(int user)
        if (user || rcu_is_cpu_rrupt_from_idle()) {
                rcu_note_voluntary_context_switch(current);
        }
-       if (t->rcu_read_lock_nesting > 0 ||
+       if (rcu_preempt_depth() > 0 ||
            (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
                /* No QS, force context switch if deferred. */
                if (rcu_preempt_need_deferred_qs(t)) {
@@ -696,13 +697,13 @@ static void rcu_flavor_sched_clock_irq(int user)
        } else if (rcu_preempt_need_deferred_qs(t)) {
                rcu_preempt_deferred_qs(t); /* Report deferred QS. */
                return;
-       } else if (!t->rcu_read_lock_nesting) {
+       } else if (!rcu_preempt_depth()) {
                rcu_qs(); /* Report immediate QS. */
                return;
        }
 
        /* If GP is oldish, ask for help from rcu_read_unlock_special(). */
-       if (t->rcu_read_lock_nesting > 0 &&
+       if (rcu_preempt_depth() > 0 &&
            __this_cpu_read(rcu_data.core_needs_qs) &&
            __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
            !t->rcu_read_unlock_special.b.need_qs &&
@@ -723,11 +724,11 @@ void exit_rcu(void)
        struct task_struct *t = current;
 
        if (unlikely(!list_empty(&current->rcu_node_entry))) {
-               t->rcu_read_lock_nesting = 1;
+               rcu_preempt_depth_set(1);
                barrier();
                WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
-       } else if (unlikely(t->rcu_read_lock_nesting)) {
-               t->rcu_read_lock_nesting = 1;
+       } else if (unlikely(rcu_preempt_depth())) {
+               rcu_preempt_depth_set(1);
        } else {
                return;
        }
@@ -757,7 +758,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
                pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
                        __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
        pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
-               __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
+               __func__, READ_ONCE(rnp->gp_tasks), rnp->boost_tasks,
+               rnp->exp_tasks);
        pr_info("%s: ->blkd_tasks", __func__);
        i = 0;
        list_for_each(lhp, &rnp->blkd_tasks) {
@@ -788,7 +790,7 @@ static void __init rcu_bootup_announce(void)
 }
 
 /*
- * Note a quiescent state for PREEMPT=n.  Because we do not need to know
+ * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
  * how many quiescent states passed, just if there was at least one since
  * the start of the grace period, this just sets a flag.  The caller must
  * have disabled preemption.
@@ -838,7 +840,7 @@ void rcu_all_qs(void)
 EXPORT_SYMBOL_GPL(rcu_all_qs);
 
 /*
- * Note a PREEMPT=n context switch.  The caller must have disabled interrupts.
+ * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
  */
 void rcu_note_context_switch(bool preempt)
 {
@@ -1262,10 +1264,9 @@ static void rcu_prepare_for_idle(void)
 /*
  * This code is invoked when a CPU goes idle, at which point we want
  * to have the CPU do everything required for RCU so that it can enter
- * the energy-efficient dyntick-idle mode.  This is handled by a
- * state machine implemented by rcu_prepare_for_idle() below.
+ * the energy-efficient dyntick-idle mode.
  *
- * The following three proprocessor symbols control this state machine:
+ * The following preprocessor symbol controls this:
  *
  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
  *     to sleep in dyntick-idle mode with RCU callbacks pending.  This
@@ -1274,21 +1275,15 @@ static void rcu_prepare_for_idle(void)
  *     number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
  *     system.  And if you are -that- concerned about energy efficiency,
  *     just power the system down and be done with it!
- * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
- *     permitted to sleep in dyntick-idle mode with only lazy RCU
- *     callbacks pending.  Setting this too high can OOM your system.
  *
- * The values below work well in practice.  If future workloads require
+ * The value below works well in practice.  If future workloads require
  * adjustment, they can be converted into kernel config parameters, though
  * making the state machine smarter might be a better option.
  */
 #define RCU_IDLE_GP_DELAY 4            /* Roughly one grace period. */
-#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
 module_param(rcu_idle_gp_delay, int, 0644);
-static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
-module_param(rcu_idle_lazy_gp_delay, int, 0644);
 
 /*
  * Try to advance callbacks on the current CPU, but only if it has been
@@ -1327,8 +1322,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
 /*
  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
- * caller to set the timeout based on whether or not there are non-lazy
- * callbacks.
+ * caller about what to set the timeout.
  *
  * The caller must have disabled interrupts.
  */
@@ -1354,25 +1348,18 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
        }
        rdp->last_accelerate = jiffies;
 
-       /* Request timer delay depending on laziness, and round. */
-       rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist);
-       if (rdp->all_lazy) {
-               dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
-       } else {
-               dj = round_up(rcu_idle_gp_delay + jiffies,
-                              rcu_idle_gp_delay) - jiffies;
-       }
+       /* Request timer and round. */
+       dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies;
+
        *nextevt = basemono + dj * TICK_NSEC;
        return 0;
 }
 
 /*
- * Prepare a CPU for idle from an RCU perspective.  The first major task
- * is to sense whether nohz mode has been enabled or disabled via sysfs.
- * The second major task is to check to see if a non-lazy callback has
- * arrived at a CPU that previously had only lazy callbacks.  The third
- * major task is to accelerate (that is, assign grace-period numbers to)
- * any recently arrived callbacks.
+ * Prepare a CPU for idle from an RCU perspective.  The first major task is to
+ * sense whether nohz mode has been enabled or disabled via sysfs.  The second
+ * major task is to accelerate (that is, assign grace-period numbers to) any
+ * recently arrived callbacks.
  *
  * The caller must have disabled interrupts.
  */
@@ -1398,17 +1385,6 @@ static void rcu_prepare_for_idle(void)
        if (!tne)
                return;
 
-       /*
-        * If a non-lazy callback arrived at a CPU having only lazy
-        * callbacks, invoke RCU core for the side-effect of recalculating
-        * idle duration on re-entry to idle.
-        */
-       if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) {
-               rdp->all_lazy = false;
-               invoke_rcu_core();
-               return;
-       }
-
        /*
         * If we have not yet accelerated this jiffy, accelerate all
         * callbacks on this CPU.
@@ -2321,6 +2297,8 @@ static void __init rcu_organize_nocb_kthreads(void)
 {
        int cpu;
        bool firsttime = true;
+       bool gotnocbs = false;
+       bool gotnocbscbs = true;
        int ls = rcu_nocb_gp_stride;
        int nl = 0;  /* Next GP kthread. */
        struct rcu_data *rdp;
@@ -2343,21 +2321,31 @@ static void __init rcu_organize_nocb_kthreads(void)
                rdp = per_cpu_ptr(&rcu_data, cpu);
                if (rdp->cpu >= nl) {
                        /* New GP kthread, set up for CBs & next GP. */
+                       gotnocbs = true;
                        nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
                        rdp->nocb_gp_rdp = rdp;
                        rdp_gp = rdp;
-                       if (!firsttime && dump_tree)
-                               pr_cont("\n");
-                       firsttime = false;
-                       pr_alert("%s: No-CB GP kthread CPU %d:", __func__, cpu);
+                       if (dump_tree) {
+                               if (!firsttime)
+                                       pr_cont("%s\n", gotnocbscbs
+                                                       ? "" : " (self only)");
+                               gotnocbscbs = false;
+                               firsttime = false;
+                               pr_alert("%s: No-CB GP kthread CPU %d:",
+                                        __func__, cpu);
+                       }
                } else {
                        /* Another CB kthread, link to previous GP kthread. */
+                       gotnocbscbs = true;
                        rdp->nocb_gp_rdp = rdp_gp;
                        rdp_prev->nocb_next_cb_rdp = rdp;
-                       pr_alert(" %d", cpu);
+                       if (dump_tree)
+                               pr_cont(" %d", cpu);
                }
                rdp_prev = rdp;
        }
+       if (gotnocbs && dump_tree)
+               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
 }
 
 /*
index c0b8c458d8a6ad267151f6cbffc791c217aeefdf..55f9b84790d3f110745759a715ff9391321b3286 100644 (file)
@@ -163,7 +163,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
 //
 // Printing RCU CPU stall warnings
 
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT_RCU
 
 /*
  * Dump detailed information for all tasks blocking the current RCU
@@ -215,7 +215,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        return ndetected;
 }
 
-#else /* #ifdef CONFIG_PREEMPTION */
+#else /* #ifdef CONFIG_PREEMPT_RCU */
 
 /*
  * Because preemptible RCU does not exist, we never have to check for
@@ -233,7 +233,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
 {
        return 0;
 }
-#endif /* #else #ifdef CONFIG_PREEMPTION */
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 /*
  * Dump stacks of all tasks running on stalled CPUs.  First try using
@@ -263,11 +263,9 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 
-       sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
+       sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
                rdp->last_accelerate & 0xffff, jiffies & 0xffff,
-               ".l"[rdp->all_lazy],
-               ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
-               ".D"[!!rdp->tick_nohz_enabled_snap]);
+               !!rdp->tick_nohz_enabled_snap);
 }
 
 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
@@ -279,6 +277,28 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 
 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
 
+static const char * const gp_state_names[] = {
+       [RCU_GP_IDLE] = "RCU_GP_IDLE",
+       [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
+       [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
+       [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
+       [RCU_GP_INIT] = "RCU_GP_INIT",
+       [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
+       [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
+       [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
+       [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
+};
+
+/*
+ * Convert a ->gp_state value to a character string.
+ */
+static const char *gp_state_getname(short gs)
+{
+       if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
+               return "???";
+       return gp_state_names[gs];
+}
+
 /*
  * Print out diagnostic information for the specified stalled CPU.
  *
index 1861103662db3d8fe8deacda2fc1d8b72598d36a..6c4b862f57d6fc0620e6a24f857cb9c1a54ce2d4 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/rcupdate_wait.h>
 #include <linux/sched/isolation.h>
 #include <linux/kprobes.h>
+#include <linux/slab.h>
 
 #define CREATE_TRACE_POINTS
 
@@ -51,9 +52,7 @@
 #define MODULE_PARAM_PREFIX "rcupdate."
 
 #ifndef CONFIG_TINY_RCU
-extern int rcu_expedited; /* from sysctl */
 module_param(rcu_expedited, int, 0);
-extern int rcu_normal; /* from sysctl */
 module_param(rcu_normal, int, 0);
 static int rcu_normal_after_boot;
 module_param(rcu_normal_after_boot, int, 0);
@@ -218,6 +217,7 @@ static int __init rcu_set_runtime_mode(void)
 {
        rcu_test_sync_prims();
        rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+       kfree_rcu_scheduler_running();
        rcu_test_sync_prims();
        return 0;
 }
@@ -435,7 +435,7 @@ struct debug_obj_descr rcuhead_debug_descr = {
 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
                               unsigned long secs,
                               unsigned long c_old, unsigned long c)
@@ -853,14 +853,22 @@ static void test_callback(struct rcu_head *r)
 
 DEFINE_STATIC_SRCU(early_srcu);
 
+struct early_boot_kfree_rcu {
+       struct rcu_head rh;
+};
+
 static void early_boot_test_call_rcu(void)
 {
        static struct rcu_head head;
        static struct rcu_head shead;
+       struct early_boot_kfree_rcu *rhp;
 
        call_rcu(&head, test_callback);
        if (IS_ENABLED(CONFIG_SRCU))
                call_srcu(&early_srcu, &shead, test_callback);
+       rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+       if (!WARN_ON_ONCE(!rhp))
+               kfree_rcu(rhp, rh);
 }
 
 void rcu_early_boot_tests(void)
index 27c48eb7de4025f94653c86878652ad57e660427..a4f86a9d6937cdfa2f13d1dcc9be863c1943d06f 100644 (file)
@@ -310,6 +310,8 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
        int ret;
 
        if (flags & RSEQ_FLAG_UNREGISTER) {
+               if (flags & ~RSEQ_FLAG_UNREGISTER)
+                       return -EINVAL;
                /* Unregister rseq for current thread. */
                if (current->rseq != rseq || !current->rseq)
                        return -EINVAL;
index 1152259a4ca0cd79379eba2c0c23bb3b8003780d..12bca64dff7319e38fce20f8d23f9bcdfd952b3a 100644 (file)
@@ -370,7 +370,7 @@ u64 sched_clock_cpu(int cpu)
        if (sched_clock_stable())
                return sched_clock() + __sched_clock_offset;
 
-       if (!static_branch_unlikely(&sched_clock_running))
+       if (!static_branch_likely(&sched_clock_running))
                return sched_clock();
 
        preempt_disable_notrace();
@@ -393,7 +393,7 @@ void sched_clock_tick(void)
        if (sched_clock_stable())
                return;
 
-       if (!static_branch_unlikely(&sched_clock_running))
+       if (!static_branch_likely(&sched_clock_running))
                return;
 
        lockdep_assert_irqs_disabled();
@@ -460,7 +460,7 @@ void __init sched_clock_init(void)
 
 u64 sched_clock_cpu(int cpu)
 {
-       if (!static_branch_unlikely(&sched_clock_running))
+       if (!static_branch_likely(&sched_clock_running))
                return 0;
 
        return sched_clock();
index 90e4b00ace892eecb12ce935a409715f19c42893..fc1dfc0076045dc2bfcb3f0bd9b60e1ec2e7ac09 100644 (file)
@@ -919,17 +919,17 @@ uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
        return uc_req;
 }
 
-unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
+unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
 {
        struct uclamp_se uc_eff;
 
        /* Task currently refcounted: use back-annotated (effective) value */
        if (p->uclamp[clamp_id].active)
-               return p->uclamp[clamp_id].value;
+               return (unsigned long)p->uclamp[clamp_id].value;
 
        uc_eff = uclamp_eff_get(p, clamp_id);
 
-       return uc_eff.value;
+       return (unsigned long)uc_eff.value;
 }
 
 /*
@@ -1253,7 +1253,8 @@ static void __init init_uclamp(void)
        mutex_init(&uclamp_mutex);
 
        for_each_possible_cpu(cpu) {
-               memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq));
+               memset(&cpu_rq(cpu)->uclamp, 0,
+                               sizeof(struct uclamp_rq)*UCLAMP_CNT);
                cpu_rq(cpu)->uclamp_flags = 0;
        }
 
@@ -4504,7 +4505,7 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
 void set_user_nice(struct task_struct *p, long nice)
 {
        bool queued, running;
-       int old_prio, delta;
+       int old_prio;
        struct rq_flags rf;
        struct rq *rq;
 
@@ -4538,19 +4539,18 @@ void set_user_nice(struct task_struct *p, long nice)
        set_load_weight(p, true);
        old_prio = p->prio;
        p->prio = effective_prio(p);
-       delta = p->prio - old_prio;
 
-       if (queued) {
+       if (queued)
                enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
-               /*
-                * If the task increased its priority or is running and
-                * lowered its priority, then reschedule its CPU:
-                */
-               if (delta < 0 || (delta > 0 && task_running(rq, p)))
-                       resched_curr(rq);
-       }
        if (running)
                set_next_task(rq, p);
+
+       /*
+        * If the task increased its priority or is running and
+        * lowered its priority, then reschedule its CPU:
+        */
+       p->sched_class->prio_changed(rq, p, old_prio);
+
 out_unlock:
        task_rq_unlock(rq, p, &rf);
 }
@@ -7100,6 +7100,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
 
        if (parent)
                sched_online_group(tg, parent);
+
+#ifdef CONFIG_UCLAMP_TASK_GROUP
+       /* Propagate the effective uclamp value for the new group */
+       cpu_util_update_eff(css);
+#endif
+
        return 0;
 }
 
index b5dcd1d83c7fada652b79576738d9542f0638d67..7c2fe50fd76ddcec0fccd157f989def9a8daac9c 100644 (file)
@@ -5,6 +5,8 @@
  * Copyright (C) 2016, Intel Corporation
  * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  */
+#include <linux/cpufreq.h>
+
 #include "sched.h"
 
 DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
@@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu)
        rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
 }
 EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
+
+/**
+ * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated.
+ * @policy: cpufreq policy to check.
+ *
+ * Return 'true' if:
+ * - the local and remote CPUs share @policy,
+ * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going
+ *   offline (in which case it is not expected to run cpufreq updates any more).
+ */
+bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
+{
+       return cpumask_test_cpu(smp_processor_id(), policy->cpus) ||
+               (policy->dvfs_possible_from_any_cpu &&
+                rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
+}
index 322ca8860f548cf8c0fa700e1a2755127659d2ce..7fbaee24c824f032ea34428ca0d50d3e49d51c0e 100644 (file)
@@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
         * by the hardware, as calculating the frequency is pointless if
         * we cannot in fact act on it.
         *
-        * For the slow switching platforms, the kthread is always scheduled on
-        * the right set of CPUs and any CPU can find the next frequency and
-        * schedule the kthread.
+        * This is needed on the slow switching platforms too to prevent CPUs
+        * going offline from leaving stale IRQ work items behind.
         */
-       if (sg_policy->policy->fast_switch_enabled &&
-           !cpufreq_this_cpu_can_update(sg_policy->policy))
+       if (!cpufreq_this_cpu_can_update(sg_policy->policy))
                return false;
 
        if (unlikely(sg_policy->limits_changed)) {
@@ -240,7 +238,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
         */
        util = util_cfs + cpu_util_rt(rq);
        if (type == FREQUENCY_UTIL)
-               util = uclamp_util_with(rq, util, p);
+               util = uclamp_rq_util_with(rq, util, p);
 
        dl_util = cpu_util_dl(rq);
 
index b7abca987d945d659b8e50b1d292abe529112707..1a2719e1350a89563bfdf1960802943e9f70be7d 100644 (file)
@@ -46,6 +46,8 @@ static int convert_prio(int prio)
  * @cp: The cpupri context
  * @p: The task
  * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
+ * @fitness_fn: A pointer to a function to do custom checks whether the CPU
+ *              fits a specific criteria so that we only return those CPUs.
  *
  * Note: This function returns the recommended CPUs as calculated during the
  * current invocation.  By the time the call returns, the CPUs may have in
@@ -57,7 +59,8 @@ static int convert_prio(int prio)
  * Return: (int)bool - CPUs were found
  */
 int cpupri_find(struct cpupri *cp, struct task_struct *p,
-               struct cpumask *lowest_mask)
+               struct cpumask *lowest_mask,
+               bool (*fitness_fn)(struct task_struct *p, int cpu))
 {
        int idx = 0;
        int task_pri = convert_prio(p->prio);
@@ -98,6 +101,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
                        continue;
 
                if (lowest_mask) {
+                       int cpu;
+
                        cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
 
                        /*
@@ -108,7 +113,23 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
                         * condition, simply act as though we never hit this
                         * priority level and continue on.
                         */
-                       if (cpumask_any(lowest_mask) >= nr_cpu_ids)
+                       if (cpumask_empty(lowest_mask))
+                               continue;
+
+                       if (!fitness_fn)
+                               return 1;
+
+                       /* Ensure the capacity of the CPUs fit the task */
+                       for_each_cpu(cpu, lowest_mask) {
+                               if (!fitness_fn(p, cpu))
+                                       cpumask_clear_cpu(cpu, lowest_mask);
+                       }
+
+                       /*
+                        * If no CPU at the current priority can fit the task
+                        * continue looking
+                        */
+                       if (cpumask_empty(lowest_mask))
                                continue;
                }
 
index 7dc20a3232e726b3b5f91389395f49d7525120a5..32dd520db11f9fe048e43dc9c5144dac23ea6c83 100644 (file)
@@ -18,7 +18,9 @@ struct cpupri {
 };
 
 #ifdef CONFIG_SMP
-int  cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask);
+int  cpupri_find(struct cpupri *cp, struct task_struct *p,
+                struct cpumask *lowest_mask,
+                bool (*fitness_fn)(struct task_struct *p, int cpu));
 void cpupri_set(struct cpupri *cp, int cpu, int pri);
 int  cpupri_init(struct cpupri *cp);
 void cpupri_cleanup(struct cpupri *cp);
index d43318a489f245d6356094b4b80f96c01e26ceb9..cff3e656566d6e3aede6414e40fa8763bb16bb64 100644 (file)
@@ -355,7 +355,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
  * softirq as those do not count in task exec_runtime any more.
  */
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                        struct rq *rq, int ticks)
+                                        int ticks)
 {
        u64 other, cputime = TICK_NSEC * ticks;
 
@@ -381,7 +381,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
                account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
        } else if (user_tick) {
                account_user_time(p, cputime);
-       } else if (p == rq->idle) {
+       } else if (p == this_rq()->idle) {
                account_idle_time(cputime);
        } else if (p->flags & PF_VCPU) { /* System time or guest time */
                account_guest_time(p, cputime);
@@ -392,14 +392,12 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
 
 static void irqtime_account_idle_ticks(int ticks)
 {
-       struct rq *rq = this_rq();
-
-       irqtime_account_process_tick(current, 0, rq, ticks);
+       irqtime_account_process_tick(current, 0, ticks);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 static inline void irqtime_account_idle_ticks(int ticks) { }
 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
-                                               struct rq *rq, int nr_ticks) { }
+                                               int nr_ticks) { }
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 /*
@@ -473,13 +471,12 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 void account_process_tick(struct task_struct *p, int user_tick)
 {
        u64 cputime, steal;
-       struct rq *rq = this_rq();
 
        if (vtime_accounting_enabled_this_cpu())
                return;
 
        if (sched_clock_irqtime) {
-               irqtime_account_process_tick(p, user_tick, rq, 1);
+               irqtime_account_process_tick(p, user_tick, 1);
                return;
        }
 
@@ -493,7 +490,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
 
        if (user_tick)
                account_user_time(p, cputime);
-       else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
+       else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
                account_system_time(p, HARDIRQ_OFFSET, cputime);
        else
                account_idle_time(cputime);
index f7e4579e746c5e240054faffc7d1025da5c44709..879d3ccf380640e454e8655814b828f334068212 100644 (file)
@@ -751,9 +751,16 @@ void sysrq_sched_debug_show(void)
        int cpu;
 
        sched_debug_header(NULL);
-       for_each_online_cpu(cpu)
+       for_each_online_cpu(cpu) {
+               /*
+                * Need to reset softlockup watchdogs on all CPUs, because
+                * another CPU might be blocked waiting for us to process
+                * an IPI or stop_machine.
+                */
+               touch_nmi_watchdog();
+               touch_all_softlockup_watchdogs();
                print_cpu(NULL, cpu);
-
+       }
 }
 
 /*
index 08a233e97a01974850ddeac7b7ccd6b782ea897b..fe4e0d775375680504d1bc2167996bb975f028a8 100644 (file)
@@ -801,7 +801,7 @@ void post_init_entity_util_avg(struct task_struct *p)
                 * For !fair tasks do:
                 *
                update_cfs_rq_load_avg(now, cfs_rq);
-               attach_entity_load_avg(cfs_rq, se, 0);
+               attach_entity_load_avg(cfs_rq, se);
                switched_from_fair(rq, p);
                 *
                 * such that the next switched_to_fair() has the
@@ -3114,7 +3114,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
 {
        struct rq *rq = rq_of(cfs_rq);
 
-       if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
+       if (&rq->cfs == cfs_rq) {
                /*
                 * There are a few boundary cases this might miss but it should
                 * get called often enough that that should (hopefully) not be
@@ -3366,16 +3366,17 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 
        runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
        runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
-       delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
-       delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
-
-       se->avg.runnable_load_sum = runnable_sum;
-       se->avg.runnable_load_avg = runnable_load_avg;
 
        if (se->on_rq) {
+               delta_sum = runnable_load_sum -
+                               se_weight(se) * se->avg.runnable_load_sum;
+               delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
                add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
                add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
        }
+
+       se->avg.runnable_load_sum = runnable_sum;
+       se->avg.runnable_load_avg = runnable_load_avg;
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3520,7 +3521,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
  */
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
 
@@ -3556,7 +3557,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
        add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
 
-       cfs_rq_util_change(cfs_rq, flags);
+       cfs_rq_util_change(cfs_rq, 0);
 
        trace_pelt_cfs_tp(cfs_rq);
 }
@@ -3614,7 +3615,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
                 *
                 * IOW we're enqueueing a task on a new CPU.
                 */
-               attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
+               attach_entity_load_avg(cfs_rq, se);
                update_tg_load_avg(cfs_rq, 0);
 
        } else if (decayed) {
@@ -3711,6 +3712,20 @@ static inline unsigned long task_util_est(struct task_struct *p)
        return max(task_util(p), _task_util_est(p));
 }
 
+#ifdef CONFIG_UCLAMP_TASK
+static inline unsigned long uclamp_task_util(struct task_struct *p)
+{
+       return clamp(task_util_est(p),
+                    uclamp_eff_value(p, UCLAMP_MIN),
+                    uclamp_eff_value(p, UCLAMP_MAX));
+}
+#else
+static inline unsigned long uclamp_task_util(struct task_struct *p)
+{
+       return task_util_est(p);
+}
+#endif
+
 static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
                                    struct task_struct *p)
 {
@@ -3822,7 +3837,7 @@ done:
 
 static inline int task_fits_capacity(struct task_struct *p, long capacity)
 {
-       return fits_capacity(task_util_est(p), capacity);
+       return fits_capacity(uclamp_task_util(p), capacity);
 }
 
 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
@@ -3857,7 +3872,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
 static inline void
-attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
+attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 
@@ -5196,6 +5211,20 @@ static inline void update_overutilized_status(struct rq *rq)
 static inline void update_overutilized_status(struct rq *rq) { }
 #endif
 
+/* Runqueue only has SCHED_IDLE tasks enqueued */
+static int sched_idle_rq(struct rq *rq)
+{
+       return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
+                       rq->nr_running);
+}
+
+#ifdef CONFIG_SMP
+static int sched_idle_cpu(int cpu)
+{
+       return sched_idle_rq(cpu_rq(cpu));
+}
+#endif
+
 /*
  * The enqueue_task method is called before nr_running is
  * increased. Here we update the fair scheduling stats and
@@ -5310,6 +5339,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        struct sched_entity *se = &p->se;
        int task_sleep = flags & DEQUEUE_SLEEP;
        int idle_h_nr_running = task_has_idle_policy(p);
+       bool was_sched_idle = sched_idle_rq(rq);
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
@@ -5356,6 +5386,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        if (!se)
                sub_nr_running(rq, 1);
 
+       /* balance early to pull high priority tasks */
+       if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
+               rq->next_balance = jiffies;
+
        util_est_dequeue(&rq->cfs, p, task_sleep);
        hrtick_update(rq);
 }
@@ -5378,15 +5412,6 @@ static struct {
 
 #endif /* CONFIG_NO_HZ_COMMON */
 
-/* CPU only has SCHED_IDLE tasks enqueued */
-static int sched_idle_cpu(int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-
-       return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
-                       rq->nr_running);
-}
-
 static unsigned long cpu_load(struct rq *rq)
 {
        return cfs_rq_load_avg(&rq->cfs);
@@ -5588,7 +5613,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
        unsigned int min_exit_latency = UINT_MAX;
        u64 latest_idle_timestamp = 0;
        int least_loaded_cpu = this_cpu;
-       int shallowest_idle_cpu = -1, si_cpu = -1;
+       int shallowest_idle_cpu = -1;
        int i;
 
        /* Check if we have any choice: */
@@ -5597,6 +5622,9 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
+               if (sched_idle_cpu(i))
+                       return i;
+
                if (available_idle_cpu(i)) {
                        struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
@@ -5619,12 +5647,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
                        }
-               } else if (shallowest_idle_cpu == -1 && si_cpu == -1) {
-                       if (sched_idle_cpu(i)) {
-                               si_cpu = i;
-                               continue;
-                       }
-
+               } else if (shallowest_idle_cpu == -1) {
                        load = cpu_load(cpu_rq(i));
                        if (load < min_load) {
                                min_load = load;
@@ -5633,11 +5656,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
                }
        }
 
-       if (shallowest_idle_cpu != -1)
-               return shallowest_idle_cpu;
-       if (si_cpu != -1)
-               return si_cpu;
-       return least_loaded_cpu;
+       return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
@@ -5790,7 +5809,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
  */
 static int select_idle_smt(struct task_struct *p, int target)
 {
-       int cpu, si_cpu = -1;
+       int cpu;
 
        if (!static_branch_likely(&sched_smt_present))
                return -1;
@@ -5798,13 +5817,11 @@ static int select_idle_smt(struct task_struct *p, int target)
        for_each_cpu(cpu, cpu_smt_mask(target)) {
                if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                        continue;
-               if (available_idle_cpu(cpu))
+               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
                        return cpu;
-               if (si_cpu == -1 && sched_idle_cpu(cpu))
-                       si_cpu = cpu;
        }
 
-       return si_cpu;
+       return -1;
 }
 
 #else /* CONFIG_SCHED_SMT */
@@ -5828,12 +5845,13 @@ static inline int select_idle_smt(struct task_struct *p, int target)
  */
 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
 {
+       struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
        struct sched_domain *this_sd;
        u64 avg_cost, avg_idle;
        u64 time, cost;
        s64 delta;
        int this = smp_processor_id();
-       int cpu, nr = INT_MAX, si_cpu = -1;
+       int cpu, nr = INT_MAX;
 
        this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
        if (!this_sd)
@@ -5859,15 +5877,13 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
 
        time = cpu_clock(this);
 
-       for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
+       cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+
+       for_each_cpu_wrap(cpu, cpus, target) {
                if (!--nr)
-                       return si_cpu;
-               if (!cpumask_test_cpu(cpu, p->cpus_ptr))
-                       continue;
-               if (available_idle_cpu(cpu))
+                       return -1;
+               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
                        break;
-               if (si_cpu == -1 && sched_idle_cpu(cpu))
-                       si_cpu = cpu;
        }
 
        time = cpu_clock(this) - time;
@@ -6268,9 +6284,18 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                        if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                                continue;
 
-                       /* Skip CPUs that will be overutilized. */
                        util = cpu_util_next(cpu, p, cpu);
                        cpu_cap = capacity_of(cpu);
+                       spare_cap = cpu_cap - util;
+
+                       /*
+                        * Skip CPUs that cannot satisfy the capacity request.
+                        * IOW, placing the task there would make the CPU
+                        * overutilized. Take uclamp into account to see how
+                        * much capacity we can get out of the CPU; this is
+                        * aligned with schedutil_cpu_util().
+                        */
+                       util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
                        if (!fits_capacity(util, cpu_cap))
                                continue;
 
@@ -6285,7 +6310,6 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                         * Find the CPU with the maximum spare capacity in
                         * the performance domain
                         */
-                       spare_cap = cpu_cap - util;
                        if (spare_cap > max_spare_cap) {
                                max_spare_cap = spare_cap;
                                max_spare_cap_cpu = cpu;
@@ -7328,7 +7352,14 @@ static int detach_tasks(struct lb_env *env)
                            load < 16 && !env->sd->nr_balance_failed)
                                goto next;
 
-                       if (load/2 > env->imbalance)
+                       /*
+                        * Make sure that we don't migrate too much load.
+                        * Nevertheless, let relax the constraint if
+                        * scheduler fails to find a good waiting task to
+                        * migrate.
+                        */
+                       if (load/2 > env->imbalance &&
+                           env->sd->nr_balance_failed <= env->sd->cache_nice_tries)
                                goto next;
 
                        env->imbalance -= load;
@@ -7773,29 +7804,11 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                 */
 
                for_each_cpu(cpu, sched_group_span(sdg)) {
-                       struct sched_group_capacity *sgc;
-                       struct rq *rq = cpu_rq(cpu);
-
-                       /*
-                        * build_sched_domains() -> init_sched_groups_capacity()
-                        * gets here before we've attached the domains to the
-                        * runqueues.
-                        *
-                        * Use capacity_of(), which is set irrespective of domains
-                        * in update_cpu_capacity().
-                        *
-                        * This avoids capacity from being 0 and
-                        * causing divide-by-zero issues on boot.
-                        */
-                       if (unlikely(!rq->sd)) {
-                               capacity += capacity_of(cpu);
-                       } else {
-                               sgc = rq->sd->groups->sgc;
-                               capacity += sgc->capacity;
-                       }
+                       unsigned long cpu_cap = capacity_of(cpu);
 
-                       min_capacity = min(capacity, min_capacity);
-                       max_capacity = max(capacity, max_capacity);
+                       capacity += cpu_cap;
+                       min_capacity = min(cpu_cap, min_capacity);
+                       max_capacity = max(cpu_cap, max_capacity);
                }
        } else  {
                /*
@@ -8161,14 +8174,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 
        case group_has_spare:
                /*
-                * Select not overloaded group with lowest number of
-                * idle cpus. We could also compare the spare capacity
-                * which is more stable but it can end up that the
-                * group has less spare capacity but finally more idle
+                * Select not overloaded group with lowest number of idle cpus
+                * and highest number of running tasks. We could also compare
+                * the spare capacity which is more stable but it can end up
+                * that the group has less spare capacity but finally more idle
                 * CPUs which means less opportunity to pull tasks.
                 */
-               if (sgs->idle_cpus >= busiest->idle_cpus)
+               if (sgs->idle_cpus > busiest->idle_cpus)
                        return false;
+               else if ((sgs->idle_cpus == busiest->idle_cpus) &&
+                        (sgs->sum_nr_running <= busiest->sum_nr_running))
+                       return false;
+
                break;
        }
 
@@ -8417,6 +8434,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
        if (!idlest)
                return NULL;
 
+       /* The local group has been skipped because of CPU affinity */
+       if (!local)
+               return idlest;
+
        /*
         * If the local group is idler than the selected idlest group
         * don't try and push the task.
@@ -9518,6 +9539,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
 {
        int continue_balancing = 1;
        int cpu = rq->cpu;
+       int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
        unsigned long interval;
        struct sched_domain *sd;
        /* Earliest time when we have to do rebalance again */
@@ -9554,7 +9576,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
                        break;
                }
 
-               interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
+               interval = get_sd_balance_interval(sd, busy);
 
                need_serialize = sd->flags & SD_SERIALIZE;
                if (need_serialize) {
@@ -9570,9 +9592,10 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
                                 * state even if we migrated tasks. Update it.
                                 */
                                idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
+                               busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
                        }
                        sd->last_balance = jiffies;
-                       interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
+                       interval = get_sd_balance_interval(sd, busy);
                }
                if (need_serialize)
                        spin_unlock(&balancing);
@@ -10322,6 +10345,9 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
        if (!task_on_rq_queued(p))
                return;
 
+       if (rq->cfs.nr_running == 1)
+               return;
+
        /*
         * Reschedule if we are currently running on this runqueue and
         * our priority decreased, or if we are not currently running on
@@ -10412,7 +10438,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
 
        /* Synchronize entity with its cfs_rq */
        update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
-       attach_entity_load_avg(cfs_rq, se, 0);
+       attach_entity_load_avg(cfs_rq, se);
        update_tg_load_avg(cfs_rq, false);
        propagate_entity_cfs_rq(se);
 }
index ffa959e912275fb93cfd1f0fdf15cdb48d269ca7..b743bf38f08fbd4c56aa4b82f138278b29794f3c 100644 (file)
@@ -158,7 +158,7 @@ static void cpuidle_idle_call(void)
        /*
         * Suspend-to-idle ("s2idle") is a system state in which all user space
         * has been frozen, all I/O devices have been suspended and the only
-        * activity happens here and in iterrupts (if any).  In that case bypass
+        * activity happens here and in interrupts (if any). In that case bypass
         * the cpuidle governor and go stratight for the deepest idle state
         * available.  Possibly also suspend the local tick and the entire
         * timekeeping to prevent timer interrupts from kicking us out of idle
index 9fcb2a695a41289fe490c64a0656300a74fefb99..008d6ac2342b7536fff293b2ce7054cf9d9cfc98 100644 (file)
@@ -163,6 +163,12 @@ static int __init housekeeping_isolcpus_setup(char *str)
                        continue;
                }
 
+               if (!strncmp(str, "managed_irq,", 12)) {
+                       str += 12;
+                       flags |= HK_FLAG_MANAGED_IRQ;
+                       continue;
+               }
+
                pr_warn("isolcpus: Error, unknown flag\n");
                return 0;
        }
index a96db50d40e06339644b53d1314f6a899b9096ec..bd006b79b3608b9d5d371996dfe523b7f5bbfacd 100644 (file)
@@ -129,8 +129,20 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
                 * Step 2
                 */
                delta %= 1024;
-               contrib = __accumulate_pelt_segments(periods,
-                               1024 - sa->period_contrib, delta);
+               if (load) {
+                       /*
+                        * This relies on the:
+                        *
+                        * if (!load)
+                        *      runnable = running = 0;
+                        *
+                        * clause from ___update_load_sum(); this results in
+                        * the below usage of @contrib to dissapear entirely,
+                        * so no point in calculating it.
+                        */
+                       contrib = __accumulate_pelt_segments(periods,
+                                       1024 - sa->period_contrib, delta);
+               }
        }
        sa->period_contrib = delta;
 
@@ -205,7 +217,9 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
         * This means that weight will be 0 but not running for a sched_entity
         * but also for a cfs_rq if the latter becomes idle. As an example,
         * this happens during idle_balance() which calls
-        * update_blocked_averages()
+        * update_blocked_averages().
+        *
+        * Also see the comment in accumulate_sum().
         */
        if (!load)
                runnable = running = 0;
index 517e3719027e619e5c7b565d1de9294dfffb5a3c..db7b50bba3f1f181e03e5911f365759ecf3acf99 100644 (file)
@@ -185,7 +185,8 @@ static void group_init(struct psi_group *group)
 
        for_each_possible_cpu(cpu)
                seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
-       group->avg_next_update = sched_clock() + psi_period;
+       group->avg_last_update = sched_clock();
+       group->avg_next_update = group->avg_last_update + psi_period;
        INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
        mutex_init(&group->avgs_lock);
        /* Init trigger-related members */
@@ -481,7 +482,7 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value)
                u32 remaining;
 
                remaining = win->size - elapsed;
-               growth += div_u64(win->prev_growth * remaining, win->size);
+               growth += div64_u64(win->prev_growth * remaining, win->size);
        }
 
        return growth;
@@ -1279,10 +1280,12 @@ static const struct file_operations psi_cpu_fops = {
 
 static int __init psi_proc_init(void)
 {
-       proc_mkdir("pressure", NULL);
-       proc_create("pressure/io", 0, NULL, &psi_io_fops);
-       proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
-       proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
+       if (psi_enable) {
+               proc_mkdir("pressure", NULL);
+               proc_create("pressure/io", 0, NULL, &psi_io_fops);
+               proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
+               proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
+       }
        return 0;
 }
 module_init(psi_proc_init);
index e591d40fd645e451e79946fc9801ee24e72ef68e..4043abe45459df664d96351030966fb7104384e2 100644 (file)
@@ -437,6 +437,45 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
        return rt_se->on_rq;
 }
 
+#ifdef CONFIG_UCLAMP_TASK
+/*
+ * Verify the fitness of task @p to run on @cpu taking into account the uclamp
+ * settings.
+ *
+ * This check is only important for heterogeneous systems where uclamp_min value
+ * is higher than the capacity of a @cpu. For non-heterogeneous system this
+ * function will always return true.
+ *
+ * The function will return true if the capacity of the @cpu is >= the
+ * uclamp_min and false otherwise.
+ *
+ * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
+ * > uclamp_max.
+ */
+static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
+{
+       unsigned int min_cap;
+       unsigned int max_cap;
+       unsigned int cpu_cap;
+
+       /* Only heterogeneous systems can benefit from this check */
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return true;
+
+       min_cap = uclamp_eff_value(p, UCLAMP_MIN);
+       max_cap = uclamp_eff_value(p, UCLAMP_MAX);
+
+       cpu_cap = capacity_orig_of(cpu);
+
+       return cpu_cap >= min(min_cap, max_cap);
+}
+#else
+static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
+{
+       return true;
+}
+#endif
+
 #ifdef CONFIG_RT_GROUP_SCHED
 
 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
@@ -1391,6 +1430,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
        struct task_struct *curr;
        struct rq *rq;
+       bool test;
 
        /* For anything but wake ups, just return the task_cpu */
        if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1422,10 +1462,16 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
         *
         * This test is optimistic, if we get it wrong the load-balancer
         * will have to sort it out.
+        *
+        * We take into account the capacity of the CPU to ensure it fits the
+        * requirement of the task - which is only important on heterogeneous
+        * systems like big.LITTLE.
         */
-       if (curr && unlikely(rt_task(curr)) &&
-           (curr->nr_cpus_allowed < 2 ||
-            curr->prio <= p->prio)) {
+       test = curr &&
+              unlikely(rt_task(curr)) &&
+              (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
+
+       if (test || !rt_task_fits_capacity(p, cpu)) {
                int target = find_lowest_rq(p);
 
                /*
@@ -1449,15 +1495,15 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         * let's hope p can move out.
         */
        if (rq->curr->nr_cpus_allowed == 1 ||
-           !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
+           !cpupri_find(&rq->rd->cpupri, rq->curr, NULL, NULL))
                return;
 
        /*
         * p is migratable, so let's not schedule it and
         * see if it is pushed or pulled somewhere else.
         */
-       if (p->nr_cpus_allowed != 1
-           && cpupri_find(&rq->rd->cpupri, p, NULL))
+       if (p->nr_cpus_allowed != 1 &&
+           cpupri_find(&rq->rd->cpupri, p, NULL, NULL))
                return;
 
        /*
@@ -1601,7 +1647,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           cpumask_test_cpu(cpu, p->cpus_ptr))
+           cpumask_test_cpu(cpu, p->cpus_ptr) &&
+           rt_task_fits_capacity(p, cpu))
                return 1;
 
        return 0;
@@ -1643,7 +1690,8 @@ static int find_lowest_rq(struct task_struct *task)
        if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
-       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
+       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask,
+                        rt_task_fits_capacity))
                return -1; /* No targets found */
 
        /*
@@ -2147,12 +2195,14 @@ skip:
  */
 static void task_woken_rt(struct rq *rq, struct task_struct *p)
 {
-       if (!task_running(rq, p) &&
-           !test_tsk_need_resched(rq->curr) &&
-           p->nr_cpus_allowed > 1 &&
-           (dl_task(rq->curr) || rt_task(rq->curr)) &&
-           (rq->curr->nr_cpus_allowed < 2 ||
-            rq->curr->prio <= p->prio))
+       bool need_to_push = !task_running(rq, p) &&
+                           !test_tsk_need_resched(rq->curr) &&
+                           p->nr_cpus_allowed > 1 &&
+                           (dl_task(rq->curr) || rt_task(rq->curr)) &&
+                           (rq->curr->nr_cpus_allowed < 2 ||
+                            rq->curr->prio <= p->prio);
+
+       if (need_to_push || !rt_task_fits_capacity(p, cpu_of(rq)))
                push_rt_tasks(rq);
 }
 
@@ -2224,7 +2274,10 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
         */
        if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-               if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
+               bool need_to_push = rq->rt.overloaded ||
+                                   !rt_task_fits_capacity(p, cpu_of(rq));
+
+               if (p->nr_cpus_allowed > 1 && need_to_push)
                        rt_queue_push_tasks(rq);
 #endif /* CONFIG_SMP */
                if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
index 280a3c73593559d0d68fca27dc16f18dfafc8c5b..1a88dc8ad11b71266480a1ef260cc522fee099ee 100644 (file)
@@ -2300,14 +2300,14 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
 #endif /* CONFIG_CPU_FREQ */
 
 #ifdef CONFIG_UCLAMP_TASK
-unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
 
 static __always_inline
-unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
-                             struct task_struct *p)
+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+                                 struct task_struct *p)
 {
-       unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
-       unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
+       unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
+       unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
 
        if (p) {
                min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
@@ -2324,18 +2324,10 @@ unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
 
        return clamp(util, min_util, max_util);
 }
-
-static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
-{
-       return uclamp_util_with(rq, util, NULL);
-}
 #else /* CONFIG_UCLAMP_TASK */
-static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
-                                           struct task_struct *p)
-{
-       return util;
-}
-static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
+static inline
+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+                                 struct task_struct *p)
 {
        return util;
 }
index 6ec1e595b1d428445d5818871f5576334930c2fa..dfb64c08a407a8f558c65f47a3a1ecd602df571a 100644 (file)
@@ -1879,6 +1879,42 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
        return sd;
 }
 
+/*
+ * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
+ * any two given CPUs at this (non-NUMA) topology level.
+ */
+static bool topology_span_sane(struct sched_domain_topology_level *tl,
+                             const struct cpumask *cpu_map, int cpu)
+{
+       int i;
+
+       /* NUMA levels are allowed to overlap */
+       if (tl->flags & SDTL_OVERLAP)
+               return true;
+
+       /*
+        * Non-NUMA levels cannot partially overlap - they must be either
+        * completely equal or completely disjoint. Otherwise we can end up
+        * breaking the sched_group lists - i.e. a later get_group() pass
+        * breaks the linking done for an earlier span.
+        */
+       for_each_cpu(i, cpu_map) {
+               if (i == cpu)
+                       continue;
+               /*
+                * We should 'and' all those masks with 'cpu_map' to exactly
+                * match the topology we're about to build, but that can only
+                * remove CPUs, which only lessens our ability to detect
+                * overlaps
+                */
+               if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
+                   cpumask_intersects(tl->mask(cpu), tl->mask(i)))
+                       return false;
+       }
+
+       return true;
+}
+
 /*
  * Find the sched_domain_topology_level where all CPU capacities are visible
  * for all CPUs.
@@ -1975,6 +2011,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
                                has_asym = true;
                        }
 
+                       if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
+                               goto error;
+
                        sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
 
                        if (tl == sched_domain_topology)
index 45eba18a28984c0d7c36f29e54a900b531a3fbb7..02ce292b9bc09592552c630dd1cfb845c6dce737 100644 (file)
@@ -179,6 +179,7 @@ void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int
                        .bit_nr = -1,
                },
                .wq_entry = {
+                       .flags   = flags,
                        .private = current,
                        .func    = var_wake_function,
                        .entry   = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
index 12d2227e5786794260a7018a9141f083b575f732..b6ea3dcb57bfefa47233ada64d74f822b6666ab6 100644 (file)
@@ -1026,6 +1026,13 @@ static long seccomp_notify_recv(struct seccomp_filter *filter,
        struct seccomp_notif unotif;
        ssize_t ret;
 
+       /* Verify that we're not given garbage to keep struct extensible. */
+       ret = check_zeroed_user(buf, sizeof(unotif));
+       if (ret < 0)
+               return ret;
+       if (!ret)
+               return -EINVAL;
+
        memset(&unotif, 0, sizeof(unotif));
 
        ret = down_interruptible(&filter->notif->request);
index 7dbcb402c2fc0af441d75d13eeaef7a642f4fd57..3b7bedc97af38d004f74439b4e587d9666e42580 100644 (file)
@@ -395,22 +395,9 @@ call:
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
-/**
- * smp_call_function_many(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- *        on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. Preemption
- * must be disabled when calling this function.
- */
-void smp_call_function_many(const struct cpumask *mask,
-                           smp_call_func_t func, void *info, bool wait)
+static void smp_call_function_many_cond(const struct cpumask *mask,
+                                       smp_call_func_t func, void *info,
+                                       bool wait, smp_cond_func_t cond_func)
 {
        struct call_function_data *cfd;
        int cpu, next_cpu, this_cpu = smp_processor_id();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
 
        /* Fastpath: do that cpu by itself. */
        if (next_cpu >= nr_cpu_ids) {
-               smp_call_function_single(cpu, func, info, wait);
+               if (!cond_func || (cond_func && cond_func(cpu, info)))
+                       smp_call_function_single(cpu, func, info, wait);
                return;
        }
 
@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
        for_each_cpu(cpu, cfd->cpumask) {
                call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
 
+               if (cond_func && !cond_func(cpu, info))
+                       continue;
+
                csd_lock(csd);
                if (wait)
                        csd->flags |= CSD_FLAG_SYNCHRONOUS;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
                }
        }
 }
+
+/**
+ * smp_call_function_many(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ *        on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. Preemption
+ * must be disabled when calling this function.
+ */
+void smp_call_function_many(const struct cpumask *mask,
+                           smp_call_func_t func, void *info, bool wait)
+{
+       smp_call_function_many_cond(mask, func, info, wait, NULL);
+}
 EXPORT_SYMBOL(smp_call_function_many);
 
 /**
@@ -668,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * @info:      An arbitrary pointer to pass to both functions.
  * @wait:      If true, wait (atomically) until function has
  *             completed on other CPUs.
- * @gfp_flags: GFP flags to use when allocating the cpumask
- *             used internally by the function.
- *
- * The function might sleep if the GFP flags indicates a non
- * atomic allocation is allowed.
  *
  * Preemption is disabled to protect against CPUs going offline but not online.
  * CPUs going online during the call will not be seen or sent an IPI.
@@ -680,46 +686,27 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * You must not call this function with disabled interrupts or
  * from a hardware interrupt handler or from a bottom half handler.
  */
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-                       smp_call_func_t func, void *info, bool wait,
-                       gfp_t gfp_flags, const struct cpumask *mask)
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+                          void *info, bool wait, const struct cpumask *mask)
 {
-       cpumask_var_t cpus;
-       int cpu, ret;
-
-       might_sleep_if(gfpflags_allow_blocking(gfp_flags));
-
-       if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
-               preempt_disable();
-               for_each_cpu(cpu, mask)
-                       if (cond_func(cpu, info))
-                               __cpumask_set_cpu(cpu, cpus);
-               on_each_cpu_mask(cpus, func, info, wait);
-               preempt_enable();
-               free_cpumask_var(cpus);
-       } else {
-               /*
-                * No free cpumask, bother. No matter, we'll
-                * just have to IPI them one by one.
-                */
-               preempt_disable();
-               for_each_cpu(cpu, mask)
-                       if (cond_func(cpu, info)) {
-                               ret = smp_call_function_single(cpu, func,
-                                                               info, wait);
-                               WARN_ON_ONCE(ret);
-                       }
-               preempt_enable();
+       int cpu = get_cpu();
+
+       smp_call_function_many_cond(mask, func, info, wait, cond_func);
+       if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               func(info);
+               local_irq_restore(flags);
        }
+       put_cpu();
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-                       smp_call_func_t func, void *info, bool wait,
-                       gfp_t gfp_flags)
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+                     void *info, bool wait)
 {
-       on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
-                               cpu_online_mask);
+       on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
index 1fe34a9fabc2b15052c7e9224e4b940036d94b04..865bb0228ab667d9407a889c45e47afd1e35d383 100644 (file)
@@ -442,7 +442,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
  * @cpumask were offline; otherwise, 0 if all executions of @fn
  * returned 0, any non zero return value if any returned non zero.
  */
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
 {
        int ret;
 
@@ -453,36 +453,6 @@ int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
        return ret;
 }
 
-/**
- * try_stop_cpus - try to stop multiple cpus
- * @cpumask: cpus to stop
- * @fn: function to execute
- * @arg: argument to @fn
- *
- * Identical to stop_cpus() except that it fails with -EAGAIN if
- * someone else is already using the facility.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * -EAGAIN if someone else is already stopping cpus, -ENOENT if
- * @fn(@arg) was not executed at all because all cpus in @cpumask were
- * offline; otherwise, 0 if all executions of @fn returned 0, any non
- * zero return value if any returned non zero.
- */
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
-{
-       int ret;
-
-       /* static works are used, process one request at a time */
-       if (!mutex_trylock(&stop_cpus_mutex))
-               return -EAGAIN;
-       ret = __stop_cpus(cpumask, fn, arg);
-       mutex_unlock(&stop_cpus_mutex);
-       return ret;
-}
-
 static int cpu_stop_should_run(unsigned int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
index 70665934d53e2aa04caa7e0d77d1bc3f62c41e19..d396aaaf19a329203e4e03dfaa3f33fffa93e0f1 100644 (file)
@@ -1268,7 +1268,7 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = proc_do_static_key,
        },
 #endif
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
        {
                .procname       = "panic_on_rcu_stall",
                .data           = &sysctl_panic_on_rcu_stall,
index 13a0f2e6ebc2c5de1d1a3c00a780ef9588d4cfb7..e2ac0e37c4ae7bac1b574e8b660a1203d01a75fd 100644 (file)
@@ -554,25 +554,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
        struct signal_struct *sig = tsk->signal;
-       struct taskstats *stats;
+       struct taskstats *stats_new, *stats;
 
-       if (sig->stats || thread_group_empty(tsk))
-               goto ret;
+       /* Pairs with smp_store_release() below. */
+       stats = smp_load_acquire(&sig->stats);
+       if (stats || thread_group_empty(tsk))
+               return stats;
 
        /* No problem if kmem_cache_zalloc() fails */
-       stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+       stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
 
        spin_lock_irq(&tsk->sighand->siglock);
-       if (!sig->stats) {
-               sig->stats = stats;
-               stats = NULL;
+       stats = sig->stats;
+       if (!stats) {
+               /*
+                * Pairs with smp_store_release() above and order the
+                * kmem_cache_zalloc().
+                */
+               smp_store_release(&sig->stats, stats_new);
+               stats = stats_new;
+               stats_new = NULL;
        }
        spin_unlock_irq(&tsk->sighand->siglock);
 
-       if (stats)
-               kmem_cache_free(taskstats_cache, stats);
-ret:
-       return sig->stats;
+       if (stats_new)
+               kmem_cache_free(taskstats_cache, stats_new);
+
+       return stats;
 }
 
 /* Send pid data out on exit */
index 1867044800bb43239856c57ce728aa8febedbd8a..c8f00168afe843f8041ddddf94ccb23c77a2a690 100644 (file)
@@ -19,3 +19,4 @@ obj-$(CONFIG_TICK_ONESHOT)                    += tick-oneshot.o tick-sched.o
 obj-$(CONFIG_HAVE_GENERIC_VDSO)                        += vsyscall.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)                      += test_udelay.o
+obj-$(CONFIG_TIME_NS)                          += namespace.o
index 451f9d05ccfec5397eee2784fb92e695d2e9c86e..2ffb466af77ecc85ccb7e7c5f43ba0862404f428 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/freezer.h>
 #include <linux/compat.h>
 #include <linux/module.h>
+#include <linux/time_namespace.h>
 
 #include "posix-timers.h"
 
  * struct alarm_base - Alarm timer bases
  * @lock:              Lock for syncrhonized access to the base
  * @timerqueue:                Timerqueue head managing the list of events
- * @gettime:           Function to read the time correlating to the base
+ * @get_ktime:         Function to read the time correlating to the base
+ * @get_timespec:      Function to read the namespace time correlating to the base
  * @base_clockid:      clockid for the base
  */
 static struct alarm_base {
        spinlock_t              lock;
        struct timerqueue_head  timerqueue;
-       ktime_t                 (*gettime)(void);
+       ktime_t                 (*get_ktime)(void);
+       void                    (*get_timespec)(struct timespec64 *tp);
        clockid_t               base_clockid;
 } alarm_bases[ALARM_NUMTYPE];
 
@@ -55,8 +58,6 @@ static DEFINE_SPINLOCK(freezer_delta_lock);
 #endif
 
 #ifdef CONFIG_RTC_CLASS
-static struct wakeup_source *ws;
-
 /* rtc timer and device for setting alarm wakeups at suspend */
 static struct rtc_timer                rtctimer;
 static struct rtc_device       *rtcdev;
@@ -66,8 +67,6 @@ static DEFINE_SPINLOCK(rtcdev_lock);
  * alarmtimer_get_rtcdev - Return selected rtcdevice
  *
  * This function returns the rtc device to use for wakealarms.
- * If one has not already been chosen, it checks to see if a
- * functional rtc device is available.
  */
 struct rtc_device *alarmtimer_get_rtcdev(void)
 {
@@ -87,7 +86,8 @@ static int alarmtimer_rtc_add_device(struct device *dev,
 {
        unsigned long flags;
        struct rtc_device *rtc = to_rtc_device(dev);
-       struct wakeup_source *__ws;
+       struct platform_device *pdev;
+       int ret = 0;
 
        if (rtcdev)
                return -EBUSY;
@@ -97,26 +97,31 @@ static int alarmtimer_rtc_add_device(struct device *dev,
        if (!device_may_wakeup(rtc->dev.parent))
                return -1;
 
-       __ws = wakeup_source_register(dev, "alarmtimer");
+       pdev = platform_device_register_data(dev, "alarmtimer",
+                                            PLATFORM_DEVID_AUTO, NULL, 0);
+       if (!IS_ERR(pdev))
+               device_init_wakeup(&pdev->dev, true);
 
        spin_lock_irqsave(&rtcdev_lock, flags);
-       if (!rtcdev) {
+       if (!IS_ERR(pdev) && !rtcdev) {
                if (!try_module_get(rtc->owner)) {
-                       spin_unlock_irqrestore(&rtcdev_lock, flags);
-                       return -1;
+                       ret = -1;
+                       goto unlock;
                }
 
                rtcdev = rtc;
                /* hold a reference so it doesn't go away */
                get_device(dev);
-               ws = __ws;
-               __ws = NULL;
+               pdev = NULL;
+       } else {
+               ret = -1;
        }
+unlock:
        spin_unlock_irqrestore(&rtcdev_lock, flags);
 
-       wakeup_source_unregister(__ws);
+       platform_device_unregister(pdev);
 
-       return 0;
+       return ret;
 }
 
 static inline void alarmtimer_rtc_timer_init(void)
@@ -138,11 +143,6 @@ static void alarmtimer_rtc_interface_remove(void)
        class_interface_unregister(&alarmtimer_rtc_interface);
 }
 #else
-struct rtc_device *alarmtimer_get_rtcdev(void)
-{
-       return NULL;
-}
-#define rtcdev (NULL)
 static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
 static inline void alarmtimer_rtc_interface_remove(void) { }
 static inline void alarmtimer_rtc_timer_init(void) { }
@@ -207,7 +207,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
        spin_unlock_irqrestore(&base->lock, flags);
 
        if (alarm->function)
-               restart = alarm->function(alarm, base->gettime());
+               restart = alarm->function(alarm, base->get_ktime());
 
        spin_lock_irqsave(&base->lock, flags);
        if (restart != ALARMTIMER_NORESTART) {
@@ -217,7 +217,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
        }
        spin_unlock_irqrestore(&base->lock, flags);
 
-       trace_alarmtimer_fired(alarm, base->gettime());
+       trace_alarmtimer_fired(alarm, base->get_ktime());
        return ret;
 
 }
@@ -225,7 +225,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
 ktime_t alarm_expires_remaining(const struct alarm *alarm)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
-       return ktime_sub(alarm->node.expires, base->gettime());
+       return ktime_sub(alarm->node.expires, base->get_ktime());
 }
 EXPORT_SYMBOL_GPL(alarm_expires_remaining);
 
@@ -270,7 +270,7 @@ static int alarmtimer_suspend(struct device *dev)
                spin_unlock_irqrestore(&base->lock, flags);
                if (!next)
                        continue;
-               delta = ktime_sub(next->expires, base->gettime());
+               delta = ktime_sub(next->expires, base->get_ktime());
                if (!min || (delta < min)) {
                        expires = next->expires;
                        min = delta;
@@ -281,7 +281,7 @@ static int alarmtimer_suspend(struct device *dev)
                return 0;
 
        if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
-               __pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
+               pm_wakeup_event(dev, 2 * MSEC_PER_SEC);
                return -EBUSY;
        }
 
@@ -296,7 +296,7 @@ static int alarmtimer_suspend(struct device *dev)
        /* Set alarm, if in the past reject suspend briefly to handle */
        ret = rtc_timer_start(rtc, &rtctimer, now, 0);
        if (ret < 0)
-               __pm_wakeup_event(ws, MSEC_PER_SEC);
+               pm_wakeup_event(dev, MSEC_PER_SEC);
        return ret;
 }
 
@@ -364,7 +364,7 @@ void alarm_start(struct alarm *alarm, ktime_t start)
        hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
        spin_unlock_irqrestore(&base->lock, flags);
 
-       trace_alarmtimer_start(alarm, base->gettime());
+       trace_alarmtimer_start(alarm, base->get_ktime());
 }
 EXPORT_SYMBOL_GPL(alarm_start);
 
@@ -377,7 +377,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
 
-       start = ktime_add_safe(start, base->gettime());
+       start = ktime_add_safe(start, base->get_ktime());
        alarm_start(alarm, start);
 }
 EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -414,7 +414,7 @@ int alarm_try_to_cancel(struct alarm *alarm)
                alarmtimer_dequeue(base, alarm);
        spin_unlock_irqrestore(&base->lock, flags);
 
-       trace_alarmtimer_cancel(alarm, base->gettime());
+       trace_alarmtimer_cancel(alarm, base->get_ktime());
        return ret;
 }
 EXPORT_SYMBOL_GPL(alarm_try_to_cancel);
@@ -474,7 +474,7 @@ u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
 
-       return alarm_forward(alarm, base->gettime(), interval);
+       return alarm_forward(alarm, base->get_ktime(), interval);
 }
 EXPORT_SYMBOL_GPL(alarm_forward_now);
 
@@ -500,7 +500,7 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
                return;
        }
 
-       delta = ktime_sub(absexp, base->gettime());
+       delta = ktime_sub(absexp, base->get_ktime());
 
        spin_lock_irqsave(&freezer_delta_lock, flags);
        if (!freezer_delta || (delta < freezer_delta)) {
@@ -632,7 +632,7 @@ static void alarm_timer_arm(struct k_itimer *timr, ktime_t expires,
        struct alarm_base *base = &alarm_bases[alarm->type];
 
        if (!absolute)
-               expires = ktime_add_safe(expires, base->gettime());
+               expires = ktime_add_safe(expires, base->get_ktime());
        if (sigev_none)
                alarm->node.expires = expires;
        else
@@ -657,23 +657,40 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec64 *tp
 }
 
 /**
- * alarm_clock_get - posix clock_get interface
+ * alarm_clock_get_timespec - posix clock_get_timespec interface
  * @which_clock: clockid
  * @tp: timespec to fill.
  *
- * Provides the underlying alarm base time.
+ * Provides the underlying alarm base time in a tasks time namespace.
  */
-static int alarm_clock_get(clockid_t which_clock, struct timespec64 *tp)
+static int alarm_clock_get_timespec(clockid_t which_clock, struct timespec64 *tp)
 {
        struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
 
        if (!alarmtimer_get_rtcdev())
                return -EINVAL;
 
-       *tp = ktime_to_timespec64(base->gettime());
+       base->get_timespec(tp);
+
        return 0;
 }
 
+/**
+ * alarm_clock_get_ktime - posix clock_get_ktime interface
+ * @which_clock: clockid
+ *
+ * Provides the underlying alarm base time in the root namespace.
+ */
+static ktime_t alarm_clock_get_ktime(clockid_t which_clock)
+{
+       struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+
+       if (!alarmtimer_get_rtcdev())
+               return -EINVAL;
+
+       return base->get_ktime();
+}
+
 /**
  * alarm_timer_create - posix timer_create interface
  * @new_timer: k_itimer pointer to manage
@@ -747,7 +764,7 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
                struct timespec64 rmt;
                ktime_t rem;
 
-               rem = ktime_sub(absexp, alarm_bases[type].gettime());
+               rem = ktime_sub(absexp, alarm_bases[type].get_ktime());
 
                if (rem <= 0)
                        return 0;
@@ -816,9 +833,11 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        exp = timespec64_to_ktime(*tsreq);
        /* Convert (if necessary) to absolute time */
        if (flags != TIMER_ABSTIME) {
-               ktime_t now = alarm_bases[type].gettime();
+               ktime_t now = alarm_bases[type].get_ktime();
 
                exp = ktime_add_safe(now, exp);
+       } else {
+               exp = timens_ktime_to_host(which_clock, exp);
        }
 
        ret = alarmtimer_do_nsleep(&alarm, exp, type);
@@ -837,7 +856,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
 
 const struct k_clock alarm_clock = {
        .clock_getres           = alarm_clock_getres,
-       .clock_get              = alarm_clock_get,
+       .clock_get_ktime        = alarm_clock_get_ktime,
+       .clock_get_timespec     = alarm_clock_get_timespec,
        .timer_create           = alarm_timer_create,
        .timer_set              = common_timer_set,
        .timer_del              = common_timer_del,
@@ -866,6 +886,12 @@ static struct platform_driver alarmtimer_driver = {
        }
 };
 
+static void get_boottime_timespec(struct timespec64 *tp)
+{
+       ktime_get_boottime_ts64(tp);
+       timens_add_boottime(tp);
+}
+
 /**
  * alarmtimer_init - Initialize alarm timer code
  *
@@ -874,17 +900,18 @@ static struct platform_driver alarmtimer_driver = {
  */
 static int __init alarmtimer_init(void)
 {
-       struct platform_device *pdev;
-       int error = 0;
+       int error;
        int i;
 
        alarmtimer_rtc_timer_init();
 
        /* Initialize alarm bases */
        alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
-       alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
+       alarm_bases[ALARM_REALTIME].get_ktime = &ktime_get_real;
+       alarm_bases[ALARM_REALTIME].get_timespec = ktime_get_real_ts64,
        alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
-       alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
+       alarm_bases[ALARM_BOOTTIME].get_ktime = &ktime_get_boottime;
+       alarm_bases[ALARM_BOOTTIME].get_timespec = get_boottime_timespec;
        for (i = 0; i < ALARM_NUMTYPE; i++) {
                timerqueue_init_head(&alarm_bases[i].timerqueue);
                spin_lock_init(&alarm_bases[i].lock);
@@ -898,15 +925,7 @@ static int __init alarmtimer_init(void)
        if (error)
                goto out_if;
 
-       pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0);
-       if (IS_ERR(pdev)) {
-               error = PTR_ERR(pdev);
-               goto out_drv;
-       }
        return 0;
-
-out_drv:
-       platform_driver_unregister(&alarmtimer_driver);
 out_if:
        alarmtimer_rtc_interface_remove();
        return error;
index 8de90ea31280bfd4986534dc03dcf5bcfeee5a75..3a609e7344f3d9106d93b6eb53e1138fcf48581d 100644 (file)
@@ -1477,7 +1477,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
 static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
                          struct hrtimer_clock_base *base,
                          struct hrtimer *timer, ktime_t *now,
-                         unsigned long flags)
+                         unsigned long flags) __must_hold(&cpu_base->lock)
 {
        enum hrtimer_restart (*fn)(struct hrtimer *);
        int restart;
@@ -1910,8 +1910,8 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
        return ret;
 }
 
-long hrtimer_nanosleep(const struct timespec64 *rqtp,
-                      const enum hrtimer_mode mode, const clockid_t clockid)
+long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+                      const clockid_t clockid)
 {
        struct restart_block *restart;
        struct hrtimer_sleeper t;
@@ -1923,7 +1923,7 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp,
                slack = 0;
 
        hrtimer_init_sleeper_on_stack(&t, clockid, mode);
-       hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
+       hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
        ret = do_nanosleep(&t, mode);
        if (ret != -ERESTART_RESTARTBLOCK)
                goto out;
@@ -1958,7 +1958,8 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
 
        current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
        current->restart_block.nanosleep.rmtp = rmtp;
-       return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+       return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
+                                CLOCK_MONOTONIC);
 }
 
 #endif
@@ -1978,7 +1979,8 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
 
        current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
        current->restart_block.nanosleep.compat_rmtp = rmtp;
-       return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+       return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
+                                CLOCK_MONOTONIC);
 }
 #endif
 
diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c
new file mode 100644 (file)
index 0000000..1285850
--- /dev/null
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Andrei Vagin <avagin@openvz.org>
+ * Author: Dmitry Safonov <dima@arista.com>
+ */
+
+#include <linux/time_namespace.h>
+#include <linux/user_namespace.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/seq_file.h>
+#include <linux/proc_ns.h>
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+
+#include <vdso/datapage.h>
+
+ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim,
+                               struct timens_offsets *ns_offsets)
+{
+       ktime_t offset;
+
+       switch (clockid) {
+       case CLOCK_MONOTONIC:
+               offset = timespec64_to_ktime(ns_offsets->monotonic);
+               break;
+       case CLOCK_BOOTTIME:
+       case CLOCK_BOOTTIME_ALARM:
+               offset = timespec64_to_ktime(ns_offsets->boottime);
+               break;
+       default:
+               return tim;
+       }
+
+       /*
+        * Check that @tim value is in [offset, KTIME_MAX + offset]
+        * and subtract offset.
+        */
+       if (tim < offset) {
+               /*
+                * User can specify @tim *absolute* value - if it's lesser than
+                * the time namespace's offset - it's already expired.
+                */
+               tim = 0;
+       } else {
+               tim = ktime_sub(tim, offset);
+               if (unlikely(tim > KTIME_MAX))
+                       tim = KTIME_MAX;
+       }
+
+       return tim;
+}
+
+static struct ucounts *inc_time_namespaces(struct user_namespace *ns)
+{
+       return inc_ucount(ns, current_euid(), UCOUNT_TIME_NAMESPACES);
+}
+
+static void dec_time_namespaces(struct ucounts *ucounts)
+{
+       dec_ucount(ucounts, UCOUNT_TIME_NAMESPACES);
+}
+
+/**
+ * clone_time_ns - Clone a time namespace
+ * @user_ns:   User namespace which owns a new namespace.
+ * @old_ns:    Namespace to clone
+ *
+ * Clone @old_ns and set the clone refcount to 1
+ *
+ * Return: The new namespace or ERR_PTR.
+ */
+static struct time_namespace *clone_time_ns(struct user_namespace *user_ns,
+                                         struct time_namespace *old_ns)
+{
+       struct time_namespace *ns;
+       struct ucounts *ucounts;
+       int err;
+
+       err = -ENOSPC;
+       ucounts = inc_time_namespaces(user_ns);
+       if (!ucounts)
+               goto fail;
+
+       err = -ENOMEM;
+       ns = kmalloc(sizeof(*ns), GFP_KERNEL);
+       if (!ns)
+               goto fail_dec;
+
+       kref_init(&ns->kref);
+
+       ns->vvar_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!ns->vvar_page)
+               goto fail_free;
+
+       err = ns_alloc_inum(&ns->ns);
+       if (err)
+               goto fail_free_page;
+
+       ns->ucounts = ucounts;
+       ns->ns.ops = &timens_operations;
+       ns->user_ns = get_user_ns(user_ns);
+       ns->offsets = old_ns->offsets;
+       ns->frozen_offsets = false;
+       return ns;
+
+fail_free_page:
+       __free_page(ns->vvar_page);
+fail_free:
+       kfree(ns);
+fail_dec:
+       dec_time_namespaces(ucounts);
+fail:
+       return ERR_PTR(err);
+}
+
+/**
+ * copy_time_ns - Create timens_for_children from @old_ns
+ * @flags:     Cloning flags
+ * @user_ns:   User namespace which owns a new namespace.
+ * @old_ns:    Namespace to clone
+ *
+ * If CLONE_NEWTIME specified in @flags, creates a new timens_for_children;
+ * adds a refcounter to @old_ns otherwise.
+ *
+ * Return: timens_for_children namespace or ERR_PTR.
+ */
+struct time_namespace *copy_time_ns(unsigned long flags,
+       struct user_namespace *user_ns, struct time_namespace *old_ns)
+{
+       if (!(flags & CLONE_NEWTIME))
+               return get_time_ns(old_ns);
+
+       return clone_time_ns(user_ns, old_ns);
+}
+
+static struct timens_offset offset_from_ts(struct timespec64 off)
+{
+       struct timens_offset ret;
+
+       ret.sec = off.tv_sec;
+       ret.nsec = off.tv_nsec;
+
+       return ret;
+}
+
+/*
+ * A time namespace VVAR page has the same layout as the VVAR page which
+ * contains the system wide VDSO data.
+ *
+ * For a normal task the VVAR pages are installed in the normal ordering:
+ *     VVAR
+ *     PVCLOCK
+ *     HVCLOCK
+ *     TIMENS   <- Not really required
+ *
+ * Now for a timens task the pages are installed in the following order:
+ *     TIMENS
+ *     PVCLOCK
+ *     HVCLOCK
+ *     VVAR
+ *
+ * The check for vdso_data->clock_mode is in the unlikely path of
+ * the seq begin magic. So for the non-timens case most of the time
+ * 'seq' is even, so the branch is not taken.
+ *
+ * If 'seq' is odd, i.e. a concurrent update is in progress, the extra check
+ * for vdso_data->clock_mode is a non-issue. The task is spin waiting for the
+ * update to finish and for 'seq' to become even anyway.
+ *
+ * Timens page has vdso_data->clock_mode set to VCLOCK_TIMENS which enforces
+ * the time namespace handling path.
+ */
+static void timens_setup_vdso_data(struct vdso_data *vdata,
+                                  struct time_namespace *ns)
+{
+       struct timens_offset *offset = vdata->offset;
+       struct timens_offset monotonic = offset_from_ts(ns->offsets.monotonic);
+       struct timens_offset boottime = offset_from_ts(ns->offsets.boottime);
+
+       vdata->seq                      = 1;
+       vdata->clock_mode               = VCLOCK_TIMENS;
+       offset[CLOCK_MONOTONIC]         = monotonic;
+       offset[CLOCK_MONOTONIC_RAW]     = monotonic;
+       offset[CLOCK_MONOTONIC_COARSE]  = monotonic;
+       offset[CLOCK_BOOTTIME]          = boottime;
+       offset[CLOCK_BOOTTIME_ALARM]    = boottime;
+}
+
+/*
+ * Protects possibly multiple offsets writers racing each other
+ * and tasks entering the namespace.
+ */
+static DEFINE_MUTEX(offset_lock);
+
+static void timens_set_vvar_page(struct task_struct *task,
+                               struct time_namespace *ns)
+{
+       struct vdso_data *vdata;
+       unsigned int i;
+
+       if (ns == &init_time_ns)
+               return;
+
+       /* Fast-path, taken by every task in namespace except the first. */
+       if (likely(ns->frozen_offsets))
+               return;
+
+       mutex_lock(&offset_lock);
+       /* Nothing to-do: vvar_page has been already initialized. */
+       if (ns->frozen_offsets)
+               goto out;
+
+       ns->frozen_offsets = true;
+       vdata = arch_get_vdso_data(page_address(ns->vvar_page));
+
+       for (i = 0; i < CS_BASES; i++)
+               timens_setup_vdso_data(&vdata[i], ns);
+
+out:
+       mutex_unlock(&offset_lock);
+}
+
+void free_time_ns(struct kref *kref)
+{
+       struct time_namespace *ns;
+
+       ns = container_of(kref, struct time_namespace, kref);
+       dec_time_namespaces(ns->ucounts);
+       put_user_ns(ns->user_ns);
+       ns_free_inum(&ns->ns);
+       __free_page(ns->vvar_page);
+       kfree(ns);
+}
+
+static struct time_namespace *to_time_ns(struct ns_common *ns)
+{
+       return container_of(ns, struct time_namespace, ns);
+}
+
+static struct ns_common *timens_get(struct task_struct *task)
+{
+       struct time_namespace *ns = NULL;
+       struct nsproxy *nsproxy;
+
+       task_lock(task);
+       nsproxy = task->nsproxy;
+       if (nsproxy) {
+               ns = nsproxy->time_ns;
+               get_time_ns(ns);
+       }
+       task_unlock(task);
+
+       return ns ? &ns->ns : NULL;
+}
+
+static struct ns_common *timens_for_children_get(struct task_struct *task)
+{
+       struct time_namespace *ns = NULL;
+       struct nsproxy *nsproxy;
+
+       task_lock(task);
+       nsproxy = task->nsproxy;
+       if (nsproxy) {
+               ns = nsproxy->time_ns_for_children;
+               get_time_ns(ns);
+       }
+       task_unlock(task);
+
+       return ns ? &ns->ns : NULL;
+}
+
+static void timens_put(struct ns_common *ns)
+{
+       put_time_ns(to_time_ns(ns));
+}
+
+static int timens_install(struct nsproxy *nsproxy, struct ns_common *new)
+{
+       struct time_namespace *ns = to_time_ns(new);
+       int err;
+
+       if (!current_is_single_threaded())
+               return -EUSERS;
+
+       if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
+           !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+               return -EPERM;
+
+       timens_set_vvar_page(current, ns);
+
+       err = vdso_join_timens(current, ns);
+       if (err)
+               return err;
+
+       get_time_ns(ns);
+       put_time_ns(nsproxy->time_ns);
+       nsproxy->time_ns = ns;
+
+       get_time_ns(ns);
+       put_time_ns(nsproxy->time_ns_for_children);
+       nsproxy->time_ns_for_children = ns;
+       return 0;
+}
+
+int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk)
+{
+       struct ns_common *nsc = &nsproxy->time_ns_for_children->ns;
+       struct time_namespace *ns = to_time_ns(nsc);
+       int err;
+
+       /* create_new_namespaces() already incremented the ref counter */
+       if (nsproxy->time_ns == nsproxy->time_ns_for_children)
+               return 0;
+
+       timens_set_vvar_page(tsk, ns);
+
+       err = vdso_join_timens(tsk, ns);
+       if (err)
+               return err;
+
+       get_time_ns(ns);
+       put_time_ns(nsproxy->time_ns);
+       nsproxy->time_ns = ns;
+
+       return 0;
+}
+
+static struct user_namespace *timens_owner(struct ns_common *ns)
+{
+       return to_time_ns(ns)->user_ns;
+}
+
+static void show_offset(struct seq_file *m, int clockid, struct timespec64 *ts)
+{
+       seq_printf(m, "%d %lld %ld\n", clockid, ts->tv_sec, ts->tv_nsec);
+}
+
+void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m)
+{
+       struct ns_common *ns;
+       struct time_namespace *time_ns;
+
+       ns = timens_for_children_get(p);
+       if (!ns)
+               return;
+       time_ns = to_time_ns(ns);
+
+       show_offset(m, CLOCK_MONOTONIC, &time_ns->offsets.monotonic);
+       show_offset(m, CLOCK_BOOTTIME, &time_ns->offsets.boottime);
+       put_time_ns(time_ns);
+}
+
+int proc_timens_set_offset(struct file *file, struct task_struct *p,
+                          struct proc_timens_offset *offsets, int noffsets)
+{
+       struct ns_common *ns;
+       struct time_namespace *time_ns;
+       struct timespec64 tp;
+       int i, err;
+
+       ns = timens_for_children_get(p);
+       if (!ns)
+               return -ESRCH;
+       time_ns = to_time_ns(ns);
+
+       if (!file_ns_capable(file, time_ns->user_ns, CAP_SYS_TIME)) {
+               put_time_ns(time_ns);
+               return -EPERM;
+       }
+
+       for (i = 0; i < noffsets; i++) {
+               struct proc_timens_offset *off = &offsets[i];
+
+               switch (off->clockid) {
+               case CLOCK_MONOTONIC:
+                       ktime_get_ts64(&tp);
+                       break;
+               case CLOCK_BOOTTIME:
+                       ktime_get_boottime_ts64(&tp);
+                       break;
+               default:
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               err = -ERANGE;
+
+               if (off->val.tv_sec > KTIME_SEC_MAX ||
+                   off->val.tv_sec < -KTIME_SEC_MAX)
+                       goto out;
+
+               tp = timespec64_add(tp, off->val);
+               /*
+                * KTIME_SEC_MAX is divided by 2 to be sure that KTIME_MAX is
+                * still unreachable.
+                */
+               if (tp.tv_sec < 0 || tp.tv_sec > KTIME_SEC_MAX / 2)
+                       goto out;
+       }
+
+       mutex_lock(&offset_lock);
+       if (time_ns->frozen_offsets) {
+               err = -EACCES;
+               goto out_unlock;
+       }
+
+       err = 0;
+       /* Don't report errors after this line */
+       for (i = 0; i < noffsets; i++) {
+               struct proc_timens_offset *off = &offsets[i];
+               struct timespec64 *offset = NULL;
+
+               switch (off->clockid) {
+               case CLOCK_MONOTONIC:
+                       offset = &time_ns->offsets.monotonic;
+                       break;
+               case CLOCK_BOOTTIME:
+                       offset = &time_ns->offsets.boottime;
+                       break;
+               }
+
+               *offset = off->val;
+       }
+
+out_unlock:
+       mutex_unlock(&offset_lock);
+out:
+       put_time_ns(time_ns);
+
+       return err;
+}
+
+const struct proc_ns_operations timens_operations = {
+       .name           = "time",
+       .type           = CLONE_NEWTIME,
+       .get            = timens_get,
+       .put            = timens_put,
+       .install        = timens_install,
+       .owner          = timens_owner,
+};
+
+const struct proc_ns_operations timens_for_children_operations = {
+       .name           = "time_for_children",
+       .type           = CLONE_NEWTIME,
+       .get            = timens_for_children_get,
+       .put            = timens_put,
+       .install        = timens_install,
+       .owner          = timens_owner,
+};
+
+struct time_namespace init_time_ns = {
+       .kref           = KREF_INIT(3),
+       .user_ns        = &init_user_ns,
+       .ns.inum        = PROC_TIME_INIT_INO,
+       .ns.ops         = &timens_operations,
+       .frozen_offsets = true,
+};
+
+static int __init time_ns_init(void)
+{
+       return 0;
+}
+subsys_initcall(time_ns_init);
index ec960bb939fdf9770b9f4b92ee87e0f1992a1329..77c0c2370b6d1d994e5752b05fe5e8aae395a75b 100644 (file)
@@ -14,8 +14,6 @@
 
 #include "posix-timers.h"
 
-static void delete_clock(struct kref *kref);
-
 /*
  * Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
  */
@@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
                err = 0;
 
        if (!err) {
-               kref_get(&clk->kref);
+               get_device(clk->dev);
                fp->private_data = clk;
        }
 out:
@@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
        if (clk->ops.release)
                err = clk->ops.release(clk);
 
-       kref_put(&clk->kref, delete_clock);
+       put_device(clk->dev);
 
        fp->private_data = NULL;
 
@@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = {
 #endif
 };
 
-int posix_clock_register(struct posix_clock *clk, dev_t devid)
+int posix_clock_register(struct posix_clock *clk, struct device *dev)
 {
        int err;
 
-       kref_init(&clk->kref);
        init_rwsem(&clk->rwsem);
 
        cdev_init(&clk->cdev, &posix_clock_file_operations);
+       err = cdev_device_add(&clk->cdev, dev);
+       if (err) {
+               pr_err("%s unable to add device %d:%d\n",
+                       dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+               return err;
+       }
        clk->cdev.owner = clk->ops.owner;
-       err = cdev_add(&clk->cdev, devid, 1);
+       clk->dev = dev;
 
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(posix_clock_register);
 
-static void delete_clock(struct kref *kref)
-{
-       struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-
-       if (clk->release)
-               clk->release(clk);
-}
-
 void posix_clock_unregister(struct posix_clock *clk)
 {
-       cdev_del(&clk->cdev);
+       cdev_device_del(&clk->cdev, clk->dev);
 
        down_write(&clk->rwsem);
        clk->zombie = true;
        up_write(&clk->rwsem);
 
-       kref_put(&clk->kref, delete_clock);
+       put_device(clk->dev);
 }
 EXPORT_SYMBOL_GPL(posix_clock_unregister);
 
@@ -315,8 +310,8 @@ out:
 }
 
 const struct k_clock clock_posix_dynamic = {
-       .clock_getres   = pc_clock_getres,
-       .clock_set      = pc_clock_settime,
-       .clock_get      = pc_clock_gettime,
-       .clock_adj      = pc_clock_adjtime,
+       .clock_getres           = pc_clock_getres,
+       .clock_set              = pc_clock_settime,
+       .clock_get_timespec     = pc_clock_gettime,
+       .clock_adj              = pc_clock_adjtime,
 };
index 42d512fcfda2e6de092831f97bfc75c85b0b6137..8ff6da77a01fdef5b3aa6d9dc77075c492a0bb17 100644 (file)
@@ -1391,26 +1391,26 @@ static int thread_cpu_timer_create(struct k_itimer *timer)
 }
 
 const struct k_clock clock_posix_cpu = {
-       .clock_getres   = posix_cpu_clock_getres,
-       .clock_set      = posix_cpu_clock_set,
-       .clock_get      = posix_cpu_clock_get,
-       .timer_create   = posix_cpu_timer_create,
-       .nsleep         = posix_cpu_nsleep,
-       .timer_set      = posix_cpu_timer_set,
-       .timer_del      = posix_cpu_timer_del,
-       .timer_get      = posix_cpu_timer_get,
-       .timer_rearm    = posix_cpu_timer_rearm,
+       .clock_getres           = posix_cpu_clock_getres,
+       .clock_set              = posix_cpu_clock_set,
+       .clock_get_timespec     = posix_cpu_clock_get,
+       .timer_create           = posix_cpu_timer_create,
+       .nsleep                 = posix_cpu_nsleep,
+       .timer_set              = posix_cpu_timer_set,
+       .timer_del              = posix_cpu_timer_del,
+       .timer_get              = posix_cpu_timer_get,
+       .timer_rearm            = posix_cpu_timer_rearm,
 };
 
 const struct k_clock clock_process = {
-       .clock_getres   = process_cpu_clock_getres,
-       .clock_get      = process_cpu_clock_get,
-       .timer_create   = process_cpu_timer_create,
-       .nsleep         = process_cpu_nsleep,
+       .clock_getres           = process_cpu_clock_getres,
+       .clock_get_timespec     = process_cpu_clock_get,
+       .timer_create           = process_cpu_timer_create,
+       .nsleep                 = process_cpu_nsleep,
 };
 
 const struct k_clock clock_thread = {
-       .clock_getres   = thread_cpu_clock_getres,
-       .clock_get      = thread_cpu_clock_get,
-       .timer_create   = thread_cpu_timer_create,
+       .clock_getres           = thread_cpu_clock_getres,
+       .clock_get_timespec     = thread_cpu_clock_get,
+       .timer_create           = thread_cpu_timer_create,
 };
index 67df65f887ac872130b4038f1e4be8d799f097e0..fcb3b21d8bdcd444f32165f98dd9fe078b3d2446 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ktime.h>
 #include <linux/timekeeping.h>
 #include <linux/posix-timers.h>
+#include <linux/time_namespace.h>
 #include <linux/compat.h>
 
 #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
@@ -77,9 +78,11 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
                break;
        case CLOCK_MONOTONIC:
                ktime_get_ts64(tp);
+               timens_add_monotonic(tp);
                break;
        case CLOCK_BOOTTIME:
                ktime_get_boottime_ts64(tp);
+               timens_add_boottime(tp);
                break;
        default:
                return -EINVAL;
@@ -126,6 +129,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
                struct __kernel_timespec __user *, rmtp)
 {
        struct timespec64 t;
+       ktime_t texp;
 
        switch (which_clock) {
        case CLOCK_REALTIME:
@@ -144,13 +148,19 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
                rmtp = NULL;
        current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
        current->restart_block.nanosleep.rmtp = rmtp;
-       return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
+       texp = timespec64_to_ktime(t);
+       if (flags & TIMER_ABSTIME)
+               texp = timens_ktime_to_host(which_clock, texp);
+       return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
                                 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
                                 which_clock);
 }
 
 #ifdef CONFIG_COMPAT
 COMPAT_SYS_NI(timer_create);
+#endif
+
+#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
 COMPAT_SYS_NI(getitimer);
 COMPAT_SYS_NI(setitimer);
 #endif
@@ -212,6 +222,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
                struct old_timespec32 __user *, rmtp)
 {
        struct timespec64 t;
+       ktime_t texp;
 
        switch (which_clock) {
        case CLOCK_REALTIME:
@@ -230,7 +241,10 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
                rmtp = NULL;
        current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
        current->restart_block.nanosleep.compat_rmtp = rmtp;
-       return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
+       texp = timespec64_to_ktime(t);
+       if (flags & TIMER_ABSTIME)
+               texp = timens_ktime_to_host(which_clock, texp);
+       return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
                                 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
                                 which_clock);
 }
index 0ec5b7a1d769f12206f90876b203aa94cc02a7ce..ff0eb30de346d943911809d311ed2cfd9912d53d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/hashtable.h>
 #include <linux/compat.h>
 #include <linux/nospec.h>
+#include <linux/time_namespace.h>
 
 #include "timekeeping.h"
 #include "posix-timers.h"
@@ -165,12 +166,17 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
 }
 
 /* Get clock_realtime */
-static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
+static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
 {
        ktime_get_real_ts64(tp);
        return 0;
 }
 
+static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
+{
+       return ktime_get_real();
+}
+
 /* Set clock_realtime */
 static int posix_clock_realtime_set(const clockid_t which_clock,
                                    const struct timespec64 *tp)
@@ -187,18 +193,25 @@ static int posix_clock_realtime_adj(const clockid_t which_clock,
 /*
  * Get monotonic time for posix timers
  */
-static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
+static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
 {
        ktime_get_ts64(tp);
+       timens_add_monotonic(tp);
        return 0;
 }
 
+static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
+{
+       return ktime_get();
+}
+
 /*
  * Get monotonic-raw time for posix timers
  */
 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
 {
        ktime_get_raw_ts64(tp);
+       timens_add_monotonic(tp);
        return 0;
 }
 
@@ -213,6 +226,7 @@ static int posix_get_monotonic_coarse(clockid_t which_clock,
                                                struct timespec64 *tp)
 {
        ktime_get_coarse_ts64(tp);
+       timens_add_monotonic(tp);
        return 0;
 }
 
@@ -222,18 +236,29 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
        return 0;
 }
 
-static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
+static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
 {
        ktime_get_boottime_ts64(tp);
+       timens_add_boottime(tp);
        return 0;
 }
 
-static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
+static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
+{
+       return ktime_get_boottime();
+}
+
+static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
 {
        ktime_get_clocktai_ts64(tp);
        return 0;
 }
 
+static ktime_t posix_get_tai_ktime(clockid_t which_clock)
+{
+       return ktime_get_clocktai();
+}
+
 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
 {
        tp->tv_sec = 0;
@@ -645,7 +670,6 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
 {
        const struct k_clock *kc = timr->kclock;
        ktime_t now, remaining, iv;
-       struct timespec64 ts64;
        bool sig_none;
 
        sig_none = timr->it_sigev_notify == SIGEV_NONE;
@@ -663,12 +687,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
                        return;
        }
 
-       /*
-        * The timespec64 based conversion is suboptimal, but it's not
-        * worth to implement yet another callback.
-        */
-       kc->clock_get(timr->it_clock, &ts64);
-       now = timespec64_to_ktime(ts64);
+       now = kc->clock_get_ktime(timr->it_clock);
 
        /*
         * When a requeue is pending or this is a SIGEV_NONE timer move the
@@ -781,7 +800,7 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
         * Posix magic: Relative CLOCK_REALTIME timers are not affected by
         * clock modifications, so they become CLOCK_MONOTONIC based under the
         * hood. See hrtimer_init(). Update timr->kclock, so the generic
-        * functions which use timr->kclock->clock_get() work.
+        * functions which use timr->kclock->clock_get_*() work.
         *
         * Note: it_clock stays unmodified, because the next timer_set() might
         * use ABSTIME, so it needs to switch back.
@@ -866,6 +885,8 @@ int common_timer_set(struct k_itimer *timr, int flags,
 
        timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
        expires = timespec64_to_ktime(new_setting->it_value);
+       if (flags & TIMER_ABSTIME)
+               expires = timens_ktime_to_host(timr->it_clock, expires);
        sigev_none = timr->it_sigev_notify == SIGEV_NONE;
 
        kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
@@ -1067,7 +1088,7 @@ SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
        if (!kc)
                return -EINVAL;
 
-       error = kc->clock_get(which_clock, &kernel_tp);
+       error = kc->clock_get_timespec(which_clock, &kernel_tp);
 
        if (!error && put_timespec64(&kernel_tp, tp))
                error = -EFAULT;
@@ -1149,7 +1170,7 @@ SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
        if (!kc)
                return -EINVAL;
 
-       err = kc->clock_get(which_clock, &ts);
+       err = kc->clock_get_timespec(which_clock, &ts);
 
        if (!err && put_old_timespec32(&ts, tp))
                err = -EFAULT;
@@ -1200,7 +1221,22 @@ SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
 static int common_nsleep(const clockid_t which_clock, int flags,
                         const struct timespec64 *rqtp)
 {
-       return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ?
+       ktime_t texp = timespec64_to_ktime(*rqtp);
+
+       return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
+                                HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+                                which_clock);
+}
+
+static int common_nsleep_timens(const clockid_t which_clock, int flags,
+                        const struct timespec64 *rqtp)
+{
+       ktime_t texp = timespec64_to_ktime(*rqtp);
+
+       if (flags & TIMER_ABSTIME)
+               texp = timens_ktime_to_host(which_clock, texp);
+
+       return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
                                 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
                                 which_clock);
 }
@@ -1261,7 +1297,8 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
 
 static const struct k_clock clock_realtime = {
        .clock_getres           = posix_get_hrtimer_res,
-       .clock_get              = posix_clock_realtime_get,
+       .clock_get_timespec     = posix_get_realtime_timespec,
+       .clock_get_ktime        = posix_get_realtime_ktime,
        .clock_set              = posix_clock_realtime_set,
        .clock_adj              = posix_clock_realtime_adj,
        .nsleep                 = common_nsleep,
@@ -1279,8 +1316,9 @@ static const struct k_clock clock_realtime = {
 
 static const struct k_clock clock_monotonic = {
        .clock_getres           = posix_get_hrtimer_res,
-       .clock_get              = posix_ktime_get_ts,
-       .nsleep                 = common_nsleep,
+       .clock_get_timespec     = posix_get_monotonic_timespec,
+       .clock_get_ktime        = posix_get_monotonic_ktime,
+       .nsleep                 = common_nsleep_timens,
        .timer_create           = common_timer_create,
        .timer_set              = common_timer_set,
        .timer_get              = common_timer_get,
@@ -1295,22 +1333,23 @@ static const struct k_clock clock_monotonic = {
 
 static const struct k_clock clock_monotonic_raw = {
        .clock_getres           = posix_get_hrtimer_res,
-       .clock_get              = posix_get_monotonic_raw,
+       .clock_get_timespec     = posix_get_monotonic_raw,
 };
 
 static const struct k_clock clock_realtime_coarse = {
        .clock_getres           = posix_get_coarse_res,
-       .clock_get              = posix_get_realtime_coarse,
+       .clock_get_timespec     = posix_get_realtime_coarse,
 };
 
 static const struct k_clock clock_monotonic_coarse = {
        .clock_getres           = posix_get_coarse_res,
-       .clock_get              = posix_get_monotonic_coarse,
+       .clock_get_timespec     = posix_get_monotonic_coarse,
 };
 
 static const struct k_clock clock_tai = {
        .clock_getres           = posix_get_hrtimer_res,
-       .clock_get              = posix_get_tai,
+       .clock_get_ktime        = posix_get_tai_ktime,
+       .clock_get_timespec     = posix_get_tai_timespec,
        .nsleep                 = common_nsleep,
        .timer_create           = common_timer_create,
        .timer_set              = common_timer_set,
@@ -1326,8 +1365,9 @@ static const struct k_clock clock_tai = {
 
 static const struct k_clock clock_boottime = {
        .clock_getres           = posix_get_hrtimer_res,
-       .clock_get              = posix_get_boottime,
-       .nsleep                 = common_nsleep,
+       .clock_get_ktime        = posix_get_boottime_ktime,
+       .clock_get_timespec     = posix_get_boottime_timespec,
+       .nsleep                 = common_nsleep_timens,
        .timer_create           = common_timer_create,
        .timer_set              = common_timer_set,
        .timer_get              = common_timer_get,
index 897c29e162b96b33273036074863cff9974fd731..f32a2ebba9b8b212e2b71ae0f34a129c367bd3cf 100644 (file)
@@ -6,8 +6,11 @@ struct k_clock {
                                struct timespec64 *tp);
        int     (*clock_set)(const clockid_t which_clock,
                             const struct timespec64 *tp);
-       int     (*clock_get)(const clockid_t which_clock,
-                            struct timespec64 *tp);
+       /* Returns the clock value in the current time namespace. */
+       int     (*clock_get_timespec)(const clockid_t which_clock,
+                                     struct timespec64 *tp);
+       /* Returns the clock value in the root time namespace. */
+       ktime_t (*clock_get_ktime)(const clockid_t which_clock);
        int     (*clock_adj)(const clockid_t which_clock, struct __kernel_timex *tx);
        int     (*timer_create)(struct k_itimer *timer);
        int     (*nsleep)(const clockid_t which_clock, int flags,
index dbd69052eaa6666854b47562c278336a87a14d89..e4332e3e2d5691430314ba70911a8610e7044d79 100644 (file)
@@ -169,14 +169,15 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
 {
        u64 res, wrap, new_mask, new_epoch, cyc, ns;
        u32 new_mult, new_shift;
-       unsigned long r;
+       unsigned long r, flags;
        char r_unit;
        struct clock_read_data rd;
 
        if (cd.rate > rate)
                return;
 
-       WARN_ON(!irqs_disabled());
+       /* Cannot register a sched_clock with interrupts on */
+       local_irq_save(flags);
 
        /* Calculate the mult/shift to convert counter ticks to ns. */
        clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
@@ -233,6 +234,8 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
        if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
                enable_sched_clock_irqtime();
 
+       local_irq_restore(flags);
+
        pr_debug("Registered %pS as sched_clock source\n", read);
 }
 
index 59225b484e4ee00e12d61411a9120a4b68502da8..7e5d3524e924da2a39a0296311a890bc3a7ad3fd 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/hrtimer.h>
 #include <linux/interrupt.h>
+#include <linux/nmi.h>
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
@@ -558,6 +559,7 @@ void tick_unfreeze(void)
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), false);
        } else {
+               touch_softlockup_watchdog();
                tick_resume_local();
        }
 
index 8b192e67aabc9d16d8a7b08c0356641b5462c783..a792d21cac645c63468f605207c0c5fd7b356242 100644 (file)
@@ -58,8 +58,9 @@ static void tick_do_update_jiffies64(ktime_t now)
 
        /*
         * Do a quick check without holding jiffies_lock:
+        * The READ_ONCE() pairs with two updates done later in this function.
         */
-       delta = ktime_sub(now, last_jiffies_update);
+       delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
        if (delta < tick_period)
                return;
 
@@ -70,8 +71,9 @@ static void tick_do_update_jiffies64(ktime_t now)
        if (delta >= tick_period) {
 
                delta = ktime_sub(delta, tick_period);
-               last_jiffies_update = ktime_add(last_jiffies_update,
-                                               tick_period);
+               /* Pairs with the lockless read in this function. */
+               WRITE_ONCE(last_jiffies_update,
+                          ktime_add(last_jiffies_update, tick_period));
 
                /* Slow path for long timeouts */
                if (unlikely(delta >= tick_period)) {
@@ -79,8 +81,10 @@ static void tick_do_update_jiffies64(ktime_t now)
 
                        ticks = ktime_divns(delta, incr);
 
-                       last_jiffies_update = ktime_add_ns(last_jiffies_update,
-                                                          incr * ticks);
+                       /* Pairs with the lockless read in this function. */
+                       WRITE_ONCE(last_jiffies_update,
+                                  ktime_add_ns(last_jiffies_update,
+                                               incr * ticks));
                }
                do_timer(++ticks);
 
index 5ee0f77094107486752338021a7e22210bdbec1d..9577c89179cd1309b99d8224bc21a6622407d210 100644 (file)
@@ -28,11 +28,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
        vdata[CS_RAW].mult                      = tk->tkr_raw.mult;
        vdata[CS_RAW].shift                     = tk->tkr_raw.shift;
 
-       /* CLOCK_REALTIME */
-       vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
-       vdso_ts->sec    = tk->xtime_sec;
-       vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
-
        /* CLOCK_MONOTONIC */
        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
        vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
@@ -70,12 +65,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
        vdso_ts->sec    = tk->xtime_sec + (s64)tk->tai_offset;
        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
-
-       /*
-        * Read without the seqlock held by clock_getres().
-        * Note: No need to have a second copy.
-        */
-       WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
 }
 
 void update_vsyscall(struct timekeeper *tk)
@@ -84,20 +73,17 @@ void update_vsyscall(struct timekeeper *tk)
        struct vdso_timestamp *vdso_ts;
        u64 nsec;
 
-       if (__arch_update_vdso_data()) {
-               /*
-                * Some architectures might want to skip the update of the
-                * data page.
-                */
-               return;
-       }
-
        /* copy vsyscall data */
        vdso_write_begin(vdata);
 
        vdata[CS_HRES_COARSE].clock_mode        = __arch_get_clock_mode(tk);
        vdata[CS_RAW].clock_mode                = __arch_get_clock_mode(tk);
 
+       /* CLOCK_REALTIME also required for time() */
+       vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
+       vdso_ts->sec    = tk->xtime_sec;
+       vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
+
        /* CLOCK_REALTIME_COARSE */
        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
        vdso_ts->sec    = tk->xtime_sec;
@@ -110,7 +96,18 @@ void update_vsyscall(struct timekeeper *tk)
        nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
        vdso_ts->sec    += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
 
-       update_vdso_data(vdata, tk);
+       /*
+        * Read without the seqlock held by clock_getres().
+        * Note: No need to have a second copy.
+        */
+       WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
+
+       /*
+        * Architectures can opt out of updating the high resolution part
+        * of the VDSO.
+        */
+       if (__arch_update_vdso_data())
+               update_vdso_data(vdata, tk);
 
        __arch_update_vsyscall(vdata, tk);
 
index 67e0c462b059cb3fcf036ccf2d7962c2b61290e8..1af321dec0f19624f5a35a44eac0fe186dd9d776 100644 (file)
@@ -96,11 +96,34 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
        return 0;
 }
 
+/*
+ * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
+ * functions. But those archs currently don't support direct functions
+ * anyway, and ftrace_find_rec_direct() is just a stub for them.
+ * Define MCOUNT_INSN_SIZE to keep those archs compiling.
+ */
+#ifndef MCOUNT_INSN_SIZE
+/* Make sure this only works without direct calls */
+# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+#  error MCOUNT_INSN_SIZE not defined with direct calls enabled
+# endif
+# define MCOUNT_INSN_SIZE 0
+#endif
+
 int function_graph_enter(unsigned long ret, unsigned long func,
                         unsigned long frame_pointer, unsigned long *retp)
 {
        struct ftrace_graph_ent trace;
 
+       /*
+        * Skip graph tracing if the return location is served by direct trampoline,
+        * since call sequence and return addresses is unpredicatable anymore.
+        * Ex: BPF trampoline may call original function and may skip frame
+        * depending on type of BPF programs attached.
+        */
+       if (ftrace_direct_func_count &&
+           ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
+               return -EBUSY;
        trace.func = func;
        trace.depth = ++current->curr_ret_depth;
 
index 74439ab5c2b660cb05302176cd56ee8489579749..9bf1f2cd515ef39dd962196f473890da7b80d99e 100644 (file)
@@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v)
        }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       avg = rec->time;
-       do_div(avg, rec->counter);
+       avg = div64_ul(rec->time, rec->counter);
        if (tracing_thresh && (avg < tracing_thresh))
                goto out;
 #endif
@@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v)
                 * Divide only 1000 for ns^2 -> us^2 conversion.
                 * trace_print_graph_duration will divide 1000 again.
                 */
-               do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+               stddev = div64_ul(stddev,
+                                 rec->counter * (rec->counter - 1) * 1000);
        }
 
        trace_seq_init(&s);
@@ -2364,7 +2364,7 @@ int ftrace_direct_func_count;
  * Search the direct_functions hash to see if the given instruction pointer
  * has a direct caller attached to it.
  */
-static unsigned long find_rec_direct(unsigned long ip)
+unsigned long ftrace_find_rec_direct(unsigned long ip)
 {
        struct ftrace_func_entry *entry;
 
@@ -2380,7 +2380,7 @@ static void call_direct_funcs(unsigned long ip, unsigned long pip,
 {
        unsigned long addr;
 
-       addr = find_rec_direct(ip);
+       addr = ftrace_find_rec_direct(ip);
        if (!addr)
                return;
 
@@ -2393,11 +2393,6 @@ struct ftrace_ops direct_ops = {
                          | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
                          | FTRACE_OPS_FL_PERMANENT,
 };
-#else
-static inline unsigned long find_rec_direct(unsigned long ip)
-{
-       return 0;
-}
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
 
 /**
@@ -2417,7 +2412,7 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
 
        if ((rec->flags & FTRACE_FL_DIRECT) &&
            (ftrace_rec_count(rec) == 1)) {
-               addr = find_rec_direct(rec->ip);
+               addr = ftrace_find_rec_direct(rec->ip);
                if (addr)
                        return addr;
                WARN_ON_ONCE(1);
@@ -2458,7 +2453,7 @@ unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
 
        /* Direct calls take precedence over trampolines */
        if (rec->flags & FTRACE_FL_DIRECT_EN) {
-               addr = find_rec_direct(rec->ip);
+               addr = ftrace_find_rec_direct(rec->ip);
                if (addr)
                        return addr;
                WARN_ON_ONCE(1);
@@ -3604,7 +3599,7 @@ static int t_show(struct seq_file *m, void *v)
                if (rec->flags & FTRACE_FL_DIRECT) {
                        unsigned long direct;
 
-                       direct = find_rec_direct(rec->ip);
+                       direct = ftrace_find_rec_direct(rec->ip);
                        if (direct)
                                seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
                }
@@ -5008,7 +5003,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
        mutex_lock(&direct_mutex);
 
        /* See if there's a direct function at @ip already */
-       if (find_rec_direct(ip))
+       if (ftrace_find_rec_direct(ip))
                goto out_unlock;
 
        ret = -ENODEV;
@@ -5027,7 +5022,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
        if (ip != rec->ip) {
                ip = rec->ip;
                /* Need to check this ip for a direct. */
-               if (find_rec_direct(ip))
+               if (ftrace_find_rec_direct(ip))
                        goto out_unlock;
        }
 
index 4bf050fcfe3be6deed57d1c7687d8985f4c71c56..3f655371eaf6ba4fe7d87b6cb10f39fb35192f9f 100644 (file)
@@ -5070,7 +5070,7 @@ static __init int test_ringbuffer(void)
        int ret = 0;
 
        if (security_locked_down(LOCKDOWN_TRACEFS)) {
-               pr_warning("Lockdown is enabled, skipping ring buffer tests\n");
+               pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
                return 0;
        }
 
index 23459d53d57698b150e206a66ebd49beb8b44beb..5b6ee4aadc268f425d0159596621d9721f5a5719 100644 (file)
@@ -1889,7 +1889,7 @@ int __init register_tracer(struct tracer *type)
        }
 
        if (security_locked_down(LOCKDOWN_TRACEFS)) {
-               pr_warning("Can not register tracer %s due to lockdown\n",
+               pr_warn("Can not register tracer %s due to lockdown\n",
                           type->name);
                return -EPERM;
        }
@@ -4685,6 +4685,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
 
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 {
+       if ((mask == TRACE_ITER_RECORD_TGID) ||
+           (mask == TRACE_ITER_RECORD_CMD))
+               lockdep_assert_held(&event_mutex);
+
        /* do nothing if flag is already set */
        if (!!(tr->trace_flags & mask) == !!enabled)
                return 0;
@@ -4752,6 +4756,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
 
        cmp += len;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&trace_types_lock);
 
        ret = match_string(trace_options, -1, cmp);
@@ -4762,6 +4767,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
                ret = set_tracer_flag(tr, 1 << ret, !neg);
 
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        /*
         * If the first trailing whitespace is replaced with '\0' by strstrip,
@@ -8076,9 +8082,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
        if (val != 0 && val != 1)
                return -EINVAL;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&trace_types_lock);
        ret = set_tracer_flag(tr, 1 << index, val);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        if (ret < 0)
                return ret;
@@ -8796,7 +8804,7 @@ struct dentry *tracing_init_dentry(void)
        struct trace_array *tr = &global_trace;
 
        if (security_locked_down(LOCKDOWN_TRACEFS)) {
-               pr_warning("Tracing disabled due to lockdown\n");
+               pr_warn("Tracing disabled due to lockdown\n");
                return ERR_PTR(-EPERM);
        }
 
@@ -9244,7 +9252,7 @@ __init static int tracer_alloc_buffers(void)
 
 
        if (security_locked_down(LOCKDOWN_TRACEFS)) {
-               pr_warning("Tracing disabled due to lockdown\n");
+               pr_warn("Tracing disabled due to lockdown\n");
                return -EPERM;
        }
 
@@ -9412,6 +9420,11 @@ __init static int tracing_set_default_clock(void)
 {
        /* sched_clock_stable() is determined in late_initcall */
        if (!trace_boot_clock && !sched_clock_stable()) {
+               if (security_locked_down(LOCKDOWN_TRACEFS)) {
+                       pr_warn("Can not set tracing clock due to lockdown\n");
+                       return -EPERM;
+               }
+
                printk(KERN_WARNING
                       "Unstable clock detected, switching default tracing clock to \"global\"\n"
                       "If you want to keep using the local clock, then add:\n"
index 63bf60f793987fbebd812fbe5252925879feaad6..a98dce1b3334c04805915461f6452a507c5a3b82 100644 (file)
@@ -52,6 +52,9 @@ enum trace_type {
 #undef __field
 #define __field(type, item)            type    item;
 
+#undef __field_fn
+#define __field_fn(type, item)         type    item;
+
 #undef __field_struct
 #define __field_struct(type, item)     __field(type, item)
 
@@ -71,26 +74,22 @@ enum trace_type {
 #define F_STRUCT(args...)              args
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)    \
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)            \
        struct struct_name {                                            \
                struct trace_entry      ent;                            \
                tstruct                                                 \
        }
 
 #undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
+#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
 
 #undef FTRACE_ENTRY_REG
-#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,        \
-                        filter, regfn) \
-       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
-                    filter)
+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,        regfn)  \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
 #undef FTRACE_ENTRY_PACKED
-#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print,     \
-                           filter)                                     \
-       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
-                    filter) __packed
+#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)     \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
 
 #include "trace_entries.h"
 
@@ -1917,17 +1916,15 @@ extern void tracing_log_err(struct trace_array *tr,
 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)    \
+#define FTRACE_ENTRY(call, struct_name, id, tstruct, print)    \
        extern struct trace_event_call                                  \
        __aligned(4) event_##call;
 #undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)        \
-       FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
-                    filter)
+#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)        \
+       FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
 #undef FTRACE_ENTRY_PACKED
-#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
-       FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
-                    filter)
+#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
+       FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
 #include "trace_entries.h"
 
index fc8e97328e540df40c3fdd5e534134431437a03f..3e9d816082843bcd02dac500933467a726f6a767 100644 (file)
@@ -61,15 +61,13 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
        TRACE_FN,
 
        F_STRUCT(
-               __field(        unsigned long,  ip              )
-               __field(        unsigned long,  parent_ip       )
+               __field_fn(     unsigned long,  ip              )
+               __field_fn(     unsigned long,  parent_ip       )
        ),
 
        F_printk(" %ps <-- %ps",
                 (void *)__entry->ip, (void *)__entry->parent_ip),
 
-       FILTER_TRACE_FN,
-
        perf_ftrace_event_register
 );
 
@@ -84,9 +82,7 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
                __field_desc(   int,            graph_ent,      depth           )
        ),
 
-       F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth),
-
-       FILTER_OTHER
+       F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
 );
 
 /* Function return entry */
@@ -97,18 +93,16 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
        F_STRUCT(
                __field_struct( struct ftrace_graph_ret,        ret     )
                __field_desc(   unsigned long,  ret,            func    )
+               __field_desc(   unsigned long,  ret,            overrun )
                __field_desc(   unsigned long long, ret,        calltime)
                __field_desc(   unsigned long long, ret,        rettime )
-               __field_desc(   unsigned long,  ret,            overrun )
                __field_desc(   int,            ret,            depth   )
        ),
 
        F_printk("<-- %ps (%d) (start: %llx  end: %llx) over: %d",
                 (void *)__entry->func, __entry->depth,
                 __entry->calltime, __entry->rettime,
-                __entry->depth),
-
-       FILTER_OTHER
+                __entry->depth)
 );
 
 /*
@@ -137,9 +131,7 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry,
        F_printk("%u:%u:%u  ==> %u:%u:%u [%03u]",
                 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
                 __entry->next_pid, __entry->next_prio, __entry->next_state,
-                __entry->next_cpu),
-
-       FILTER_OTHER
+                __entry->next_cpu)
 );
 
 /*
@@ -157,9 +149,7 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
        F_printk("%u:%u:%u  ==+ %u:%u:%u [%03u]",
                 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
                 __entry->next_pid, __entry->next_prio, __entry->next_state,
-                __entry->next_cpu),
-
-       FILTER_OTHER
+                __entry->next_cpu)
 );
 
 /*
@@ -183,9 +173,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
                 (void *)__entry->caller[0], (void *)__entry->caller[1],
                 (void *)__entry->caller[2], (void *)__entry->caller[3],
                 (void *)__entry->caller[4], (void *)__entry->caller[5],
-                (void *)__entry->caller[6], (void *)__entry->caller[7]),
-
-       FILTER_OTHER
+                (void *)__entry->caller[6], (void *)__entry->caller[7])
 );
 
 FTRACE_ENTRY(user_stack, userstack_entry,
@@ -203,9 +191,7 @@ FTRACE_ENTRY(user_stack, userstack_entry,
                 (void *)__entry->caller[0], (void *)__entry->caller[1],
                 (void *)__entry->caller[2], (void *)__entry->caller[3],
                 (void *)__entry->caller[4], (void *)__entry->caller[5],
-                (void *)__entry->caller[6], (void *)__entry->caller[7]),
-
-       FILTER_OTHER
+                (void *)__entry->caller[6], (void *)__entry->caller[7])
 );
 
 /*
@@ -222,9 +208,7 @@ FTRACE_ENTRY(bprint, bprint_entry,
        ),
 
        F_printk("%ps: %s",
-                (void *)__entry->ip, __entry->fmt),
-
-       FILTER_OTHER
+                (void *)__entry->ip, __entry->fmt)
 );
 
 FTRACE_ENTRY_REG(print, print_entry,
@@ -239,8 +223,6 @@ FTRACE_ENTRY_REG(print, print_entry,
        F_printk("%ps: %s",
                 (void *)__entry->ip, __entry->buf),
 
-       FILTER_OTHER,
-
        ftrace_event_register
 );
 
@@ -254,9 +236,7 @@ FTRACE_ENTRY(raw_data, raw_data_entry,
        ),
 
        F_printk("id:%04x %08x",
-                __entry->id, (int)__entry->buf[0]),
-
-       FILTER_OTHER
+                __entry->id, (int)__entry->buf[0])
 );
 
 FTRACE_ENTRY(bputs, bputs_entry,
@@ -269,9 +249,7 @@ FTRACE_ENTRY(bputs, bputs_entry,
        ),
 
        F_printk("%ps: %s",
-                (void *)__entry->ip, __entry->str),
-
-       FILTER_OTHER
+                (void *)__entry->ip, __entry->str)
 );
 
 FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
@@ -283,16 +261,14 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
                __field_desc(   resource_size_t, rw,    phys    )
                __field_desc(   unsigned long,  rw,     value   )
                __field_desc(   unsigned long,  rw,     pc      )
-               __field_desc(   int,            rw,     map_id  )
+               __field_desc(   int,            rw,     map_id  )
                __field_desc(   unsigned char,  rw,     opcode  )
                __field_desc(   unsigned char,  rw,     width   )
        ),
 
        F_printk("%lx %lx %lx %d %x %x",
                 (unsigned long)__entry->phys, __entry->value, __entry->pc,
-                __entry->map_id, __entry->opcode, __entry->width),
-
-       FILTER_OTHER
+                __entry->map_id, __entry->opcode, __entry->width)
 );
 
 FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
@@ -304,15 +280,13 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
                __field_desc(   resource_size_t, map,   phys    )
                __field_desc(   unsigned long,  map,    virt    )
                __field_desc(   unsigned long,  map,    len     )
-               __field_desc(   int,            map,    map_id  )
+               __field_desc(   int,            map,    map_id  )
                __field_desc(   unsigned char,  map,    opcode  )
        ),
 
        F_printk("%lx %lx %lx %d %x",
                 (unsigned long)__entry->phys, __entry->virt, __entry->len,
-                __entry->map_id, __entry->opcode),
-
-       FILTER_OTHER
+                __entry->map_id, __entry->opcode)
 );
 
 
@@ -334,9 +308,7 @@ FTRACE_ENTRY(branch, trace_branch,
        F_printk("%u:%s:%s (%u)%s",
                 __entry->line,
                 __entry->func, __entry->file, __entry->correct,
-                __entry->constant ? " CONSTANT" : ""),
-
-       FILTER_OTHER
+                __entry->constant ? " CONSTANT" : "")
 );
 
 
@@ -362,7 +334,5 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
                 __entry->duration,
                 __entry->outer_duration,
                 __entry->nmi_total_ts,
-                __entry->nmi_count),
-
-       FILTER_OTHER
+                __entry->nmi_count)
 );
index c6de3cebc127fe83842887a300cae6d44dcd8c98..c8622a44d300e24487eaf187aa037447ecaf2450 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 
 #include <trace/events/sched.h>
+#include <trace/syscall.h>
 
 #include <asm/setup.h>
 
@@ -320,7 +321,8 @@ void trace_event_enable_cmd_record(bool enable)
        struct trace_event_file *file;
        struct trace_array *tr;
 
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
+
        do_for_each_event_file(tr, file) {
 
                if (!(file->flags & EVENT_FILE_FL_ENABLED))
@@ -334,7 +336,6 @@ void trace_event_enable_cmd_record(bool enable)
                        clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
                }
        } while_for_each_event_file();
-       mutex_unlock(&event_mutex);
 }
 
 void trace_event_enable_tgid_record(bool enable)
@@ -342,7 +343,8 @@ void trace_event_enable_tgid_record(bool enable)
        struct trace_event_file *file;
        struct trace_array *tr;
 
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
+
        do_for_each_event_file(tr, file) {
                if (!(file->flags & EVENT_FILE_FL_ENABLED))
                        continue;
@@ -356,7 +358,6 @@ void trace_event_enable_tgid_record(bool enable)
                                  &file->flags);
                }
        } while_for_each_event_file();
-       mutex_unlock(&event_mutex);
 }
 
 static int __ftrace_event_enable_disable(struct trace_event_file *file,
@@ -2017,7 +2018,24 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
         */
        head = trace_get_fields(call);
        if (list_empty(head)) {
-               ret = call->class->define_fields(call);
+               struct trace_event_fields *field = call->class->fields_array;
+               unsigned int offset = sizeof(struct trace_entry);
+
+               for (; field->type; field++) {
+                       if (field->type == TRACE_FUNCTION_TYPE) {
+                               ret = field->define_fields(call);
+                               break;
+                       }
+
+                       offset = ALIGN(offset, field->align);
+                       ret = trace_define_field(call, field->type, field->name,
+                                                offset, field->size,
+                                                field->is_signed, field->filter_type);
+                       if (ret)
+                               break;
+
+                       offset += field->size;
+               }
                if (ret < 0) {
                        pr_warn("Could not initialize trace point events/%s\n",
                                name);
index c9a74f82b14a279c270268c43cad6287d691d438..bf44f6bbd0c36c896f7110840043f9bd2befc1fd 100644 (file)
@@ -1662,7 +1662,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
        parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0);
        return -EINVAL;
  fail_mem:
-       kfree(filter);
+       __free_filter(filter);
        /* If any call succeeded, we still need to sync */
        if (!fail)
                tracepoint_synchronize_unregister();
index f49d1a36d3ae3896672e0b1a65dad6648ac95f7d..f2896d13001b81301672379160a512b9d6022263 100644 (file)
@@ -116,6 +116,7 @@ struct hist_field {
        struct ftrace_event_field       *field;
        unsigned long                   flags;
        hist_field_fn_t                 fn;
+       unsigned int                    ref;
        unsigned int                    size;
        unsigned int                    offset;
        unsigned int                    is_signed;
@@ -911,7 +912,26 @@ static notrace void trace_event_raw_event_synth(void *__data,
                        strscpy(str_field, str_val, STR_VAR_LEN_MAX);
                        n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
                } else {
-                       entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
+                       struct synth_field *field = event->fields[i];
+                       u64 val = var_ref_vals[var_ref_idx + i];
+
+                       switch (field->size) {
+                       case 1:
+                               *(u8 *)&entry->fields[n_u64] = (u8)val;
+                               break;
+
+                       case 2:
+                               *(u16 *)&entry->fields[n_u64] = (u16)val;
+                               break;
+
+                       case 4:
+                               *(u32 *)&entry->fields[n_u64] = (u32)val;
+                               break;
+
+                       default:
+                               entry->fields[n_u64] = val;
+                               break;
+                       }
                        n_u64++;
                }
        }
@@ -1135,6 +1155,12 @@ static struct synth_event *find_synth_event(const char *name)
        return NULL;
 }
 
+static struct trace_event_fields synth_event_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = synth_event_define_fields },
+       {}
+};
+
 static int register_synth_event(struct synth_event *event)
 {
        struct trace_event_call *call = &event->call;
@@ -1156,7 +1182,7 @@ static int register_synth_event(struct synth_event *event)
 
        INIT_LIST_HEAD(&call->class->fields);
        call->event.funcs = &synth_event_funcs;
-       call->class->define_fields = synth_event_define_fields;
+       call->class->fields_array = synth_event_fields_array;
 
        ret = register_trace_event(&call->event);
        if (!ret) {
@@ -1747,11 +1773,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
        struct event_trigger_data *test;
        struct hist_field *hist_field;
 
+       lockdep_assert_held(&event_mutex);
+
        hist_field = find_var_field(hist_data, var_name);
        if (hist_field)
                return hist_field;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@ -1801,7 +1829,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
        struct event_trigger_data *test;
        struct hist_field *hist_field;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@ -2404,8 +2434,16 @@ static int contains_operator(char *str)
        return field_op;
 }
 
+static void get_hist_field(struct hist_field *hist_field)
+{
+       hist_field->ref++;
+}
+
 static void __destroy_hist_field(struct hist_field *hist_field)
 {
+       if (--hist_field->ref > 1)
+               return;
+
        kfree(hist_field->var.name);
        kfree(hist_field->name);
        kfree(hist_field->type);
@@ -2447,6 +2485,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
        if (!hist_field)
                return NULL;
 
+       hist_field->ref = 1;
+
        hist_field->hist_data = hist_data;
 
        if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
@@ -2642,6 +2682,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
 {
        unsigned long flags = HIST_FIELD_FL_VAR_REF;
        struct hist_field *ref_field;
+       int i;
+
+       /* Check if the variable already exists */
+       for (i = 0; i < hist_data->n_var_refs; i++) {
+               ref_field = hist_data->var_refs[i];
+               if (ref_field->var.idx == var_field->var.idx &&
+                   ref_field->var.hist_data == var_field->hist_data) {
+                       get_hist_field(ref_field);
+                       return ref_field;
+               }
+       }
 
        ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
        if (ref_field) {
@@ -3096,7 +3147,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
 {
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (test->private_data == hist_data)
                                return test->filter_str;
@@ -3147,9 +3200,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
        struct event_trigger_data *test;
        unsigned int n_keys;
 
+       lockdep_assert_held(&event_mutex);
+
        n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
 
@@ -5509,7 +5564,7 @@ static int hist_show(struct seq_file *m, void *v)
                goto out_unlock;
        }
 
-       list_for_each_entry_rcu(data, &event_file->triggers, list) {
+       list_for_each_entry(data, &event_file->triggers, list) {
                if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
                        hist_trigger_show(m, data, n++);
        }
@@ -5902,7 +5957,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
        if (hist_data->attrs->name && !named_data)
                goto new;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -5986,10 +6043,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
        struct event_trigger_data *test, *named_data = NULL;
        bool match = false;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (hist_trigger_match(data, test, named_data, false)) {
                                match = true;
@@ -6007,10 +6066,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
        struct hist_trigger_data *hist_data = data->private_data;
        struct event_trigger_data *test, *named_data = NULL;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6032,10 +6093,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *test, *named_data = NULL;
        bool unregistered = false;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6061,7 +6124,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
        struct hist_trigger_data *hist_data;
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
                        if (check_var_refs(hist_data))
@@ -6304,7 +6369,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
        struct enable_trigger_data *enable_data = data->private_data;
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
+       list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
+                               lockdep_is_held(&event_mutex)) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (enable_data->enable)
                                test->paused = false;
index d43710718ee592a660ef679f48a75c0ffe4ecd81..22bcf7c51d1ee5cf9407aed3a2f47531780335a0 100644 (file)
@@ -17,12 +17,10 @@ static int
 trace_inject_entry(struct trace_event_file *file, void *rec, int len)
 {
        struct trace_event_buffer fbuffer;
-       struct ring_buffer *buffer;
        int written = 0;
        void *entry;
 
        rcu_read_lock_sched();
-       buffer = file->tr->trace_buffer.buffer;
        entry = trace_event_buffer_reserve(&fbuffer, file, len);
        if (entry) {
                memcpy(entry, rec, len);
@@ -197,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry)
        unsigned long irq_flags;
        void *entry = NULL;
        int entry_size;
-       u64 val;
+       u64 val = 0;
        int len;
 
        entry = trace_alloc_entry(call, &entry_size);
index 2cd53ca21b515f620873572845a8e591ae44a968..40106fff06a48a53ea9ed5f9e2d36114ed695bfe 100644 (file)
@@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
        struct event_trigger_data *data;
        bool set_cond = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                if (data->filter || event_command_post_trigger(data->cmd_ops) ||
                    event_command_needs_rec(data->cmd_ops)) {
                        set_cond = true;
@@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *test;
        int ret = 0;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
                        ret = -EEXIST;
                        goto out;
@@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *data;
        bool unregistered = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
                        unregistered = true;
                        list_del_rcu(&data->list);
@@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
        struct event_trigger_data *test;
        int ret = 0;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                test_enable_data = test->private_data;
                if (test_enable_data &&
                    (test->cmd_ops->trigger_type ==
@@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
        struct event_trigger_data *data;
        bool unregistered = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                enable_data = data->private_data;
                if (enable_data &&
                    (data->cmd_ops->trigger_type ==
index 2e6d2e9741cc7f0d165abc9ed314301aedd44a02..77ce5a3b6773e75ff2a117e0913b3588b1cfa529 100644 (file)
@@ -29,10 +29,8 @@ static int ftrace_event_register(struct trace_event_call *call,
  * function and thus become accesible via perf.
  */
 #undef FTRACE_ENTRY_REG
-#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
-                        filter, regfn) \
-       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
-                    filter)
+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
 /* not needed for this file */
 #undef __field_struct
@@ -41,6 +39,9 @@ static int ftrace_event_register(struct trace_event_call *call,
 #undef __field
 #define __field(type, item)                            type item;
 
+#undef __field_fn
+#define __field_fn(type, item)                         type item;
+
 #undef __field_desc
 #define __field_desc(type, container, item)            type item;
 
@@ -60,7 +61,7 @@ static int ftrace_event_register(struct trace_event_call *call,
 #define F_printk(fmt, args...) fmt, args
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)    \
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)            \
 struct ____ftrace_##name {                                             \
        tstruct                                                         \
 };                                                                     \
@@ -73,76 +74,46 @@ static void __always_unused ____ftrace_check_##name(void)           \
 }
 
 #undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print, filter)        \
-       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
-                    filter)
+#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print)                \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
 #include "trace_entries.h"
 
+#undef __field_ext
+#define __field_ext(_type, _item, _filter_type) {                      \
+       .type = #_type, .name = #_item,                                 \
+       .size = sizeof(_type), .align = __alignof__(_type),             \
+       is_signed_type(_type), .filter_type = _filter_type },
+
 #undef __field
-#define __field(type, item)                                            \
-       ret = trace_define_field(event_call, #type, #item,              \
-                                offsetof(typeof(field), item),         \
-                                sizeof(field.item),                    \
-                                is_signed_type(type), filter_type);    \
-       if (ret)                                                        \
-               return ret;
+#define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER)
+
+#undef __field_fn
+#define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN)
 
 #undef __field_desc
-#define __field_desc(type, container, item)    \
-       ret = trace_define_field(event_call, #type, #item,              \
-                                offsetof(typeof(field),                \
-                                         container.item),              \
-                                sizeof(field.container.item),          \
-                                is_signed_type(type), filter_type);    \
-       if (ret)                                                        \
-               return ret;
+#define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
 
 #undef __array
-#define __array(type, item, len)                                       \
-       do {                                                            \
-               char *type_str = #type"["__stringify(len)"]";           \
-               BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
-               ret = trace_define_field(event_call, type_str, #item,   \
-                                offsetof(typeof(field), item),         \
-                                sizeof(field.item),                    \
-                                is_signed_type(type), filter_type);    \
-               if (ret)                                                \
-                       return ret;                                     \
-       } while (0);
+#define __array(_type, _item, _len) {                                  \
+       .type = #_type"["__stringify(_len)"]", .name = #_item,          \
+       .size = sizeof(_type[_len]), .align = __alignof__(_type),       \
+       is_signed_type(_type), .filter_type = FILTER_OTHER },
 
 #undef __array_desc
-#define __array_desc(type, container, item, len)                       \
-       BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                         \
-       ret = trace_define_field(event_call, #type "[" #len "]", #item, \
-                                offsetof(typeof(field),                \
-                                         container.item),              \
-                                sizeof(field.container.item),          \
-                                is_signed_type(type), filter_type);    \
-       if (ret)                                                        \
-               return ret;
+#define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
 
 #undef __dynamic_array
-#define __dynamic_array(type, item)                                    \
-       ret = trace_define_field(event_call, #type "[]", #item,  \
-                                offsetof(typeof(field), item),         \
-                                0, is_signed_type(type), filter_type);\
-       if (ret)                                                        \
-               return ret;
+#define __dynamic_array(_type, _item) {                                        \
+       .type = #_type "[]", .name = #_item,                            \
+       .size = 0, .align = __alignof__(_type),                         \
+       is_signed_type(_type), .filter_type = FILTER_OTHER },
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)    \
-static int __init                                                      \
-ftrace_define_fields_##name(struct trace_event_call *event_call)       \
-{                                                                      \
-       struct struct_name field;                                       \
-       int ret;                                                        \
-       int filter_type = filter;                                       \
-                                                                       \
-       tstruct;                                                        \
-                                                                       \
-       return ret;                                                     \
-}
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)            \
+static struct trace_event_fields ftrace_event_fields_##name[] = {      \
+       tstruct                                                         \
+       {} };
 
 #include "trace_entries.h"
 
@@ -152,6 +123,9 @@ ftrace_define_fields_##name(struct trace_event_call *event_call)    \
 #undef __field
 #define __field(type, item)
 
+#undef __field_fn
+#define __field_fn(type, item)
+
 #undef __field_desc
 #define __field_desc(type, container, item)
 
@@ -168,12 +142,10 @@ ftrace_define_fields_##name(struct trace_event_call *event_call)  \
 #define F_printk(fmt, args...) __stringify(fmt) ", "  __stringify(args)
 
 #undef FTRACE_ENTRY_REG
-#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
-                        regfn)                                         \
-                                                                       \
+#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, regfn) \
 static struct trace_event_class __refdata event_class_ftrace_##call = {        \
        .system                 = __stringify(TRACE_SYSTEM),            \
-       .define_fields          = ftrace_define_fields_##call,          \
+       .fields_array           = ftrace_event_fields_##call,           \
        .fields                 = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
        .reg                    = regfn,                                \
 };                                                                     \
@@ -191,9 +163,9 @@ static struct trace_event_call __used                                               \
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print, filter) \
+#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print)         \
        FTRACE_ENTRY_REG(call, struct_name, etype,                      \
-                        PARAMS(tstruct), PARAMS(print), filter, NULL)
+                        PARAMS(tstruct), PARAMS(print), NULL)
 
 bool ftrace_event_is_function(struct trace_event_call *call)
 {
index 7f890262c8a3a4b71c01ac0a9c5f9146ed6a5223..aa515d578c5b19638f4122795ab1ee73c930cdb1 100644 (file)
@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
        INIT_HLIST_NODE(&tk->rp.kp.hlist);
        INIT_LIST_HEAD(&tk->rp.kp.list);
 
-       ret = trace_probe_init(&tk->tp, event, group);
+       ret = trace_probe_init(&tk->tp, event, group, false);
        if (ret < 0)
                goto error;
 
@@ -1555,16 +1555,28 @@ static struct trace_event_functions kprobe_funcs = {
        .trace          = print_kprobe_event
 };
 
+static struct trace_event_fields kretprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = kretprobe_event_define_fields },
+       {}
+};
+
+static struct trace_event_fields kprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = kprobe_event_define_fields },
+       {}
+};
+
 static inline void init_trace_event_call(struct trace_kprobe *tk)
 {
        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
        if (trace_kprobe_is_return(tk)) {
                call->event.funcs = &kretprobe_funcs;
-               call->class->define_fields = kretprobe_event_define_fields;
+               call->class->fields_array = kretprobe_fields_array;
        } else {
                call->event.funcs = &kprobe_funcs;
-               call->class->define_fields = kprobe_event_define_fields;
+               call->class->fields_array = kprobe_fields_array;
        }
 
        call->flags = TRACE_EVENT_FL_KPROBE;
index 905b10af5d5c5470b0b317aadc03937eb76f48ab..9ae87be422f2abee0663128d9ee1f75c646c9b75 100644 (file)
@@ -984,15 +984,19 @@ void trace_probe_cleanup(struct trace_probe *tp)
 }
 
 int trace_probe_init(struct trace_probe *tp, const char *event,
-                    const char *group)
+                    const char *group, bool alloc_filter)
 {
        struct trace_event_call *call;
+       size_t size = sizeof(struct trace_probe_event);
        int ret = 0;
 
        if (!event || !group)
                return -EINVAL;
 
-       tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
+       if (alloc_filter)
+               size += sizeof(struct trace_uprobe_filter);
+
+       tp->event = kzalloc(size, GFP_KERNEL);
        if (!tp->event)
                return -ENOMEM;
 
index 4ee703728aeca5c46712fea9d139d78f2dfcf1ff..a0ff9e200ef6fb3f6617ec6f181e0d78d078135d 100644 (file)
@@ -223,6 +223,12 @@ struct probe_arg {
        const struct fetch_type *type;  /* Type of this argument */
 };
 
+struct trace_uprobe_filter {
+       rwlock_t                rwlock;
+       int                     nr_systemwide;
+       struct list_head        perf_events;
+};
+
 /* Event call and class holder */
 struct trace_probe_event {
        unsigned int                    flags;  /* For TP_FLAG_* */
@@ -230,6 +236,7 @@ struct trace_probe_event {
        struct trace_event_call         call;
        struct list_head                files;
        struct list_head                probes;
+       struct trace_uprobe_filter      filter[0];
 };
 
 struct trace_probe {
@@ -322,7 +329,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
 }
 
 int trace_probe_init(struct trace_probe *tp, const char *event,
-                    const char *group);
+                    const char *group, bool alloc_filter);
 void trace_probe_cleanup(struct trace_probe *tp);
 int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
 void trace_probe_unlink(struct trace_probe *tp);
index 5e43b9664ecabb366dc8e81faa4d09d962084baa..617e297f46dcc78aa0c310c3e7d288159eaa0ab2 100644 (file)
@@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
        if (ret) {
                pr_info("wakeup trace: Couldn't activate tracepoint"
                        " probe to kernel_sched_migrate_task\n");
-               return;
+               goto fail_deprobe_sched_switch;
        }
 
        wakeup_reset(tr);
@@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
                printk(KERN_ERR "failed to start wakeup tracer\n");
 
        return;
+fail_deprobe_sched_switch:
+       unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 fail_deprobe_wake_new:
        unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
 fail_deprobe:
index 344e4c1aa09ccf6bc69a17da761a45290681407c..87de6edafd147e1c68aa4e485719202926adc45f 100644 (file)
@@ -381,7 +381,7 @@ int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
                       int prefix_type, int rowsize, int groupsize,
                       const void *buf, size_t len, bool ascii)
 {
-               unsigned int save_len = s->seq.len;
+       unsigned int save_len = s->seq.len;
 
        if (s->full)
                return 0;
index 4df9a209f7caf2aaf213c7a45e317dd0c79338f2..c557f42a93971a96512fb90bb4e7be2f0e550b2a 100644 (file)
@@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack)
        local_irq_restore(flags);
 }
 
+/* Some archs may not define MCOUNT_INSN_SIZE */
+#ifndef MCOUNT_INSN_SIZE
+# define MCOUNT_INSN_SIZE 0
+#endif
+
 static void
 stack_trace_call(unsigned long ip, unsigned long parent_ip,
                 struct ftrace_ops *op, struct pt_regs *pt_regs)
index 16fa218556faf1cfde3e25b24796a8847b75476b..2978c29d87d4784e4805732331fc1110ed3a4c94 100644 (file)
@@ -203,11 +203,10 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
 
 extern char *__bad_type_size(void);
 
-#define SYSCALL_FIELD(type, field, name)                               \
-       sizeof(type) != sizeof(trace.field) ?                           \
-               __bad_type_size() :                                     \
-               #type, #name, offsetof(typeof(trace), field),           \
-               sizeof(trace.field), is_signed_type(type)
+#define SYSCALL_FIELD(_type, _name) {                                  \
+       .type = #_type, .name = #_name,                                 \
+       .size = sizeof(_type), .align = __alignof__(_type),             \
+       .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }
 
 static int __init
 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
@@ -274,42 +273,23 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
 {
        struct syscall_trace_enter trace;
        struct syscall_metadata *meta = call->data;
-       int ret;
-       int i;
        int offset = offsetof(typeof(trace), args);
-
-       ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
-                                FILTER_OTHER);
-       if (ret)
-               return ret;
+       int ret = 0;
+       int i;
 
        for (i = 0; i < meta->nb_args; i++) {
                ret = trace_define_field(call, meta->types[i],
                                         meta->args[i], offset,
                                         sizeof(unsigned long), 0,
                                         FILTER_OTHER);
+               if (ret)
+                       break;
                offset += sizeof(unsigned long);
        }
 
        return ret;
 }
 
-static int __init syscall_exit_define_fields(struct trace_event_call *call)
-{
-       struct syscall_trace_exit trace;
-       int ret;
-
-       ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
-                                FILTER_OTHER);
-       if (ret)
-               return ret;
-
-       ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret),
-                                FILTER_OTHER);
-
-       return ret;
-}
-
 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 {
        struct trace_array *tr = data;
@@ -507,6 +487,13 @@ static int __init init_syscall_trace(struct trace_event_call *call)
        return id;
 }
 
+static struct trace_event_fields __refdata syscall_enter_fields_array[] = {
+       SYSCALL_FIELD(int, __syscall_nr),
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = syscall_enter_define_fields },
+       {}
+};
+
 struct trace_event_functions enter_syscall_print_funcs = {
        .trace          = print_syscall_enter,
 };
@@ -518,7 +505,7 @@ struct trace_event_functions exit_syscall_print_funcs = {
 struct trace_event_class __refdata event_class_syscall_enter = {
        .system         = "syscalls",
        .reg            = syscall_enter_register,
-       .define_fields  = syscall_enter_define_fields,
+       .fields_array   = syscall_enter_fields_array,
        .get_fields     = syscall_get_enter_fields,
        .raw_init       = init_syscall_trace,
 };
@@ -526,7 +513,11 @@ struct trace_event_class __refdata event_class_syscall_enter = {
 struct trace_event_class __refdata event_class_syscall_exit = {
        .system         = "syscalls",
        .reg            = syscall_exit_register,
-       .define_fields  = syscall_exit_define_fields,
+       .fields_array   = (struct trace_event_fields[]){
+               SYSCALL_FIELD(int, __syscall_nr),
+               SYSCALL_FIELD(long, ret),
+               {}
+       },
        .fields         = LIST_HEAD_INIT(event_class_syscall_exit.fields),
        .raw_init       = init_syscall_trace,
 };
index 352073d36585ade088c3610725a1af273f4fb5a8..7885ebd23d0c10d974abaf6e1a305e4b05974479 100644 (file)
@@ -34,12 +34,6 @@ struct uprobe_trace_entry_head {
 #define DATAOF_TRACE_ENTRY(entry, is_return)           \
        ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
 
-struct trace_uprobe_filter {
-       rwlock_t                rwlock;
-       int                     nr_systemwide;
-       struct list_head        perf_events;
-};
-
 static int trace_uprobe_create(int argc, const char **argv);
 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
 static int trace_uprobe_release(struct dyn_event *ev);
@@ -60,7 +54,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
  */
 struct trace_uprobe {
        struct dyn_event                devent;
-       struct trace_uprobe_filter      filter;
        struct uprobe_consumer          consumer;
        struct path                     path;
        struct inode                    *inode;
@@ -351,7 +344,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        if (!tu)
                return ERR_PTR(-ENOMEM);
 
-       ret = trace_probe_init(&tu->tp, event, group);
+       ret = trace_probe_init(&tu->tp, event, group, true);
        if (ret < 0)
                goto error;
 
@@ -359,7 +352,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        tu->consumer.handler = uprobe_dispatcher;
        if (is_ret)
                tu->consumer.ret_handler = uretprobe_dispatcher;
-       init_trace_uprobe_filter(&tu->filter);
+       init_trace_uprobe_filter(tu->tp.event->filter);
        return tu;
 
 error:
@@ -1067,13 +1060,14 @@ static void __probe_event_disable(struct trace_probe *tp)
        struct trace_probe *pos;
        struct trace_uprobe *tu;
 
+       tu = container_of(tp, struct trace_uprobe, tp);
+       WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
+
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
                if (!tu->inode)
                        continue;
 
-               WARN_ON(!uprobe_filter_is_empty(&tu->filter));
-
                uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
                tu->inode = NULL;
        }
@@ -1108,7 +1102,7 @@ static int probe_event_enable(struct trace_event_call *call,
        }
 
        tu = container_of(tp, struct trace_uprobe, tp);
-       WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+       WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
 
        if (enabled)
                return 0;
@@ -1205,39 +1199,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
 }
 
 static inline bool
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
+                         struct perf_event *event)
 {
-       return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
+       return __uprobe_perf_filter(filter, event->hw.target->mm);
 }
 
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
+                                      struct perf_event *event)
 {
        bool done;
 
-       write_lock(&tu->filter.rwlock);
+       write_lock(&filter->rwlock);
        if (event->hw.target) {
                list_del(&event->hw.tp_list);
-               done = tu->filter.nr_systemwide ||
+               done = filter->nr_systemwide ||
                        (event->hw.target->flags & PF_EXITING) ||
-                       uprobe_filter_event(tu, event);
+                       trace_uprobe_filter_event(filter, event);
        } else {
-               tu->filter.nr_systemwide--;
-               done = tu->filter.nr_systemwide;
+               filter->nr_systemwide--;
+               done = filter->nr_systemwide;
        }
-       write_unlock(&tu->filter.rwlock);
+       write_unlock(&filter->rwlock);
 
-       if (!done)
-               return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
-
-       return 0;
+       return done;
 }
 
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
+/* This returns true if the filter always covers target mm */
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
+                                   struct perf_event *event)
 {
        bool done;
-       int err;
 
-       write_lock(&tu->filter.rwlock);
+       write_lock(&filter->rwlock);
        if (event->hw.target) {
                /*
                 * event->parent != NULL means copy_process(), we can avoid
@@ -1247,28 +1241,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
                 * attr.enable_on_exec means that exec/mmap will install the
                 * breakpoints we need.
                 */
-               done = tu->filter.nr_systemwide ||
+               done = filter->nr_systemwide ||
                        event->parent || event->attr.enable_on_exec ||
-                       uprobe_filter_event(tu, event);
-               list_add(&event->hw.tp_list, &tu->filter.perf_events);
+                       trace_uprobe_filter_event(filter, event);
+               list_add(&event->hw.tp_list, &filter->perf_events);
        } else {
-               done = tu->filter.nr_systemwide;
-               tu->filter.nr_systemwide++;
+               done = filter->nr_systemwide;
+               filter->nr_systemwide++;
        }
-       write_unlock(&tu->filter.rwlock);
+       write_unlock(&filter->rwlock);
 
-       err = 0;
-       if (!done) {
-               err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
-               if (err)
-                       uprobe_perf_close(tu, event);
-       }
-       return err;
+       return done;
 }
 
-static int uprobe_perf_multi_call(struct trace_event_call *call,
-                                 struct perf_event *event,
-               int (*op)(struct trace_uprobe *tu, struct perf_event *event))
+static int uprobe_perf_close(struct trace_event_call *call,
+                            struct perf_event *event)
 {
        struct trace_probe *pos, *tp;
        struct trace_uprobe *tu;
@@ -1278,25 +1265,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
        if (WARN_ON_ONCE(!tp))
                return -ENODEV;
 
+       tu = container_of(tp, struct trace_uprobe, tp);
+       if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
+               return 0;
+
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
-               ret = op(tu, event);
+               ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
                if (ret)
                        break;
        }
 
        return ret;
 }
+
+static int uprobe_perf_open(struct trace_event_call *call,
+                           struct perf_event *event)
+{
+       struct trace_probe *pos, *tp;
+       struct trace_uprobe *tu;
+       int err = 0;
+
+       tp = trace_probe_primary_from_call(call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENODEV;
+
+       tu = container_of(tp, struct trace_uprobe, tp);
+       if (trace_uprobe_filter_add(tu->tp.event->filter, event))
+               return 0;
+
+       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+               err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+               if (err) {
+                       uprobe_perf_close(call, event);
+                       break;
+               }
+       }
+
+       return err;
+}
+
 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
                                enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 {
+       struct trace_uprobe_filter *filter;
        struct trace_uprobe *tu;
        int ret;
 
        tu = container_of(uc, struct trace_uprobe, consumer);
-       read_lock(&tu->filter.rwlock);
-       ret = __uprobe_perf_filter(&tu->filter, mm);
-       read_unlock(&tu->filter.rwlock);
+       filter = tu->tp.event->filter;
+
+       read_lock(&filter->rwlock);
+       ret = __uprobe_perf_filter(filter, mm);
+       read_unlock(&filter->rwlock);
 
        return ret;
 }
@@ -1419,10 +1440,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
                return 0;
 
        case TRACE_REG_PERF_OPEN:
-               return uprobe_perf_multi_call(event, data, uprobe_perf_open);
+               return uprobe_perf_open(event, data);
 
        case TRACE_REG_PERF_CLOSE:
-               return uprobe_perf_multi_call(event, data, uprobe_perf_close);
+               return uprobe_perf_close(event, data);
 
 #endif
        default:
@@ -1507,12 +1528,17 @@ static struct trace_event_functions uprobe_funcs = {
        .trace          = print_uprobe_event
 };
 
+static struct trace_event_fields uprobe_fields_array[] = {
+       { .type = TRACE_FUNCTION_TYPE,
+         .define_fields = uprobe_event_define_fields },
+       {}
+};
+
 static inline void init_trace_event_call(struct trace_uprobe *tu)
 {
        struct trace_event_call *call = trace_probe_event_call(&tu->tp);
-
        call->event.funcs = &uprobe_funcs;
-       call->class->define_fields = uprobe_event_define_fields;
+       call->class->fields_array = uprobe_fields_array;
 
        call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
        call->class->reg = trace_uprobe_register;
index 9a1c22310323d0a4bcf97f75a49d7cdfc859c7fe..9e31bfc818ff821dae0f82e4f5c1048a149a9f20 100644 (file)
@@ -148,8 +148,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
 #define DEFINE_TRACING_MAP_CMP_FN(type)                                        \
 static int tracing_map_cmp_##type(void *val_a, void *val_b)            \
 {                                                                      \
-       type a = *(type *)val_a;                                        \
-       type b = *(type *)val_b;                                        \
+       type a = (type)(*(u64 *)val_a);                                 \
+       type b = (type)(*(u64 *)val_b);                                 \
                                                                        \
        return (a > b) ? 1 : ((a < b) ? -1 : 0);                        \
 }
index 862b460ab97a8e954c3ac292be94d88670a6fb0c..53144d0562522e6d00824073f65ffa9926ae6261 100644 (file)
@@ -68,9 +68,8 @@ EXPORT_SYMBOL(on_each_cpu_mask);
  * Preemption is disabled here to make sure the cond_func is called under the
  * same condtions in UP and SMP.
  */
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
-                          smp_call_func_t func, void *info, bool wait,
-                          gfp_t gfp_flags, const struct cpumask *mask)
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+                          void *info, bool wait, const struct cpumask *mask)
 {
        unsigned long flags;
 
@@ -84,11 +83,10 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
 }
 EXPORT_SYMBOL(on_each_cpu_cond_mask);
 
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
-                     smp_call_func_t func, void *info, bool wait,
-                     gfp_t gfp_flags)
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+                     void *info, bool wait)
 {
-       on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
+       on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
 }
 EXPORT_SYMBOL(on_each_cpu_cond);
 
index f41334ef097130da12b365e61ae75533775205ad..b6b1f54a78376e6429ab3bc97e2c09c79f9428cc 100644 (file)
@@ -161,6 +161,8 @@ static void lockup_detector_update_enable(void)
 
 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
 
+#define SOFTLOCKUP_RESET       ULONG_MAX
+
 /* Global variables, exported for sysctl */
 unsigned int __read_mostly softlockup_panic =
                        CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
@@ -173,8 +175,6 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
-static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
-static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static unsigned long soft_lockup_nmi_warn;
 
@@ -274,7 +274,7 @@ notrace void touch_softlockup_watchdog_sched(void)
         * Preemption can be enabled.  It doesn't matter which CPU's timestamp
         * gets zeroed here, so use the raw_ operation.
         */
-       raw_cpu_write(watchdog_touch_ts, 0);
+       raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
 }
 
 notrace void touch_softlockup_watchdog(void)
@@ -298,14 +298,14 @@ void touch_all_softlockup_watchdogs(void)
         * the softlockup check.
         */
        for_each_cpu(cpu, &watchdog_allowed_mask)
-               per_cpu(watchdog_touch_ts, cpu) = 0;
+               per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
        wq_watchdog_touch(-1);
 }
 
 void touch_softlockup_watchdog_sync(void)
 {
        __this_cpu_write(softlockup_touch_sync, true);
-       __this_cpu_write(watchdog_touch_ts, 0);
+       __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
 }
 
 static int is_softlockup(unsigned long touch_ts)
@@ -350,8 +350,6 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
  */
 static int softlockup_fn(void *data)
 {
-       __this_cpu_write(soft_lockup_hrtimer_cnt,
-                        __this_cpu_read(hrtimer_interrupts));
        __touch_watchdog();
        complete(this_cpu_ptr(&softlockup_completion));
 
@@ -383,7 +381,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        /* .. and repeat */
        hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
 
-       if (touch_ts == 0) {
+       if (touch_ts == SOFTLOCKUP_RESET) {
                if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
                        /*
                         * If the time stamp was touched atomically
@@ -416,22 +414,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                        return HRTIMER_RESTART;
 
                /* only warn once */
-               if (__this_cpu_read(soft_watchdog_warn) == true) {
-                       /*
-                        * When multiple processes are causing softlockups the
-                        * softlockup detector only warns on the first one
-                        * because the code relies on a full quiet cycle to
-                        * re-arm.  The second process prevents the quiet cycle
-                        * and never gets reported.  Use task pointers to detect
-                        * this.
-                        */
-                       if (__this_cpu_read(softlockup_task_ptr_saved) !=
-                           current) {
-                               __this_cpu_write(soft_watchdog_warn, false);
-                               __touch_watchdog();
-                       }
+               if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
-               }
 
                if (softlockup_all_cpu_backtrace) {
                        /* Prevent multiple soft-lockup reports if one cpu is already
@@ -447,7 +431,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
-               __this_cpu_write(softlockup_task_ptr_saved, current);
                print_modules();
                print_irqtrace_events(current);
                if (regs)
index bc88fd939f4e72c43de15ad96c42aa2fdce4a996..301db4406bc37ab1805806573124fe696c41518e 100644 (file)
@@ -2266,7 +2266,7 @@ __acquires(&pool->lock)
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
         */
-       trace_workqueue_execute_end(work);
+       trace_workqueue_execute_end(work, worker->current_func);
        lock_map_release(&lockdep_map);
        lock_map_release(&pwq->wq->lockdep_map);
 
@@ -2280,7 +2280,7 @@ __acquires(&pool->lock)
        }
 
        /*
-        * The following prevents a kworker from hogging CPU on !PREEMPT
+        * The following prevents a kworker from hogging CPU on !PREEMPTION
         * kernels, where a requeueing work item waiting for something to
         * happen could deadlock with stop_machine as such work item could
         * indefinitely requeue itself while all other CPUs are trapped in
@@ -4374,8 +4374,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
        for_each_pwq(pwq, wq) {
                spin_lock_irq(&pwq->pool->lock);
                if (WARN_ON(pwq_busy(pwq))) {
-                       pr_warning("%s: %s has the following busy pwq\n",
-                                  __func__, wq->name);
+                       pr_warn("%s: %s has the following busy pwq\n",
+                               __func__, wq->name);
                        show_pwq(pwq);
                        spin_unlock_irq(&pwq->pool->lock);
                        mutex_unlock(&wq->mutex);
index d1842fe756d554c6a6a74bf4727bcf31c16e7b7a..6859f523517b6dbe87cf00b6147959c2e2c182b0 100644 (file)
@@ -1025,7 +1025,7 @@ config DEBUG_TIMEKEEPING
 
 config DEBUG_PREEMPT
        bool "Debug preemptible kernel"
-       depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
+       depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
        default y
        help
          If you say Y here then the kernel will use a debug variant of the
@@ -1483,6 +1483,55 @@ config PROVIDE_OHCI1394_DMA_INIT
 
          See Documentation/debugging-via-ohci1394.txt for more information.
 
+source "samples/Kconfig"
+
+config ARCH_HAS_DEVMEM_IS_ALLOWED
+       bool
+
+config STRICT_DEVMEM
+       bool "Filter access to /dev/mem"
+       depends on MMU && DEVMEM
+       depends on ARCH_HAS_DEVMEM_IS_ALLOWED
+       default y if PPC || X86 || ARM64
+       help
+         If this option is disabled, you allow userspace (root) access to all
+         of memory, including kernel and userspace memory. Accidental
+         access to this is obviously disastrous, but specific access can
+         be used by people debugging the kernel. Note that with PAT support
+         enabled, even in this case there are restrictions on /dev/mem
+         use due to the cache aliasing requirements.
+
+         If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
+         file only allows userspace access to PCI space and the BIOS code and
+         data regions.  This is sufficient for dosemu and X and all common
+         users of /dev/mem.
+
+         If in doubt, say Y.
+
+config IO_STRICT_DEVMEM
+       bool "Filter I/O access to /dev/mem"
+       depends on STRICT_DEVMEM
+       help
+         If this option is disabled, you allow userspace (root) access to all
+         io-memory regardless of whether a driver is actively using that
+         range.  Accidental access to this is obviously disastrous, but
+         specific access can be used by people debugging kernel drivers.
+
+         If this option is switched on, the /dev/mem file only allows
+         userspace access to *idle* io-memory ranges (see /proc/iomem) This
+         may break traditional users of /dev/mem (dosemu, legacy X, etc...)
+         if the driver using a given range cannot be disabled.
+
+         If in doubt, say Y.
+
+menu "$(SRCARCH) Debugging"
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu
+
+menu "Kernel Testing and Coverage"
+
 source "lib/kunit/Kconfig"
 
 config NOTIFIER_ERROR_INJECTION
@@ -1643,10 +1692,6 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        help
          Provide stacktrace filter for fault-injection capabilities
 
-endmenu # "Kernel Testing and Coverage"
-
-menu "Kernel Testing and Coverage"
-
 config ARCH_HAS_KCOV
        bool
        help
@@ -2130,52 +2175,7 @@ config MEMTEST
                memtest=17, mean do 17 test patterns.
          If you are unsure how to answer this question, answer N.
 
-source "samples/Kconfig"
-
-config ARCH_HAS_DEVMEM_IS_ALLOWED
-       bool
-
-config STRICT_DEVMEM
-       bool "Filter access to /dev/mem"
-       depends on MMU && DEVMEM
-       depends on ARCH_HAS_DEVMEM_IS_ALLOWED
-       default y if PPC || X86 || ARM64
-       ---help---
-         If this option is disabled, you allow userspace (root) access to all
-         of memory, including kernel and userspace memory. Accidental
-         access to this is obviously disastrous, but specific access can
-         be used by people debugging the kernel. Note that with PAT support
-         enabled, even in this case there are restrictions on /dev/mem
-         use due to the cache aliasing requirements.
-
-         If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
-         file only allows userspace access to PCI space and the BIOS code and
-         data regions.  This is sufficient for dosemu and X and all common
-         users of /dev/mem.
-
-         If in doubt, say Y.
 
-config IO_STRICT_DEVMEM
-       bool "Filter I/O access to /dev/mem"
-       depends on STRICT_DEVMEM
-       ---help---
-         If this option is disabled, you allow userspace (root) access to all
-         io-memory regardless of whether a driver is actively using that
-         range.  Accidental access to this is obviously disastrous, but
-         specific access can be used by people debugging kernel drivers.
-
-         If this option is switched on, the /dev/mem file only allows
-         userspace access to *idle* io-memory ranges (see /proc/iomem) This
-         may break traditional users of /dev/mem (dosemu, legacy X, etc...)
-         if the driver using a given range cannot be disabled.
-
-         If in doubt, say Y.
-
-menu "$(SRCARCH) Debugging"
-
-source "arch/$(SRCARCH)/Kconfig.debug"
-
-endmenu
 
 config HYPERV_TESTING
        bool "Microsoft Hyper-V driver testing"
@@ -2184,4 +2184,6 @@ config HYPERV_TESTING
        help
          Select this option to enable Hyper-V vmbus testing.
 
+endmenu # "Kernel Testing and Coverage"
+
 endmenu # Kernel hacking
index 93217d44237f2893555f03992424756a1d83080e..c20b1debe9b4f984e4c375d5d42a0e2f1bee156c 100644 (file)
@@ -223,7 +223,7 @@ KASAN_SANITIZE_stackdepot.o := n
 KCOV_INSTRUMENT_stackdepot.o := n
 
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
-              fdt_empty_tree.o
+              fdt_empty_tree.o fdt_addresses.o
 $(foreach file, $(libfdt_files), \
        $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
 lib-$(CONFIG_LIBFDT) += $(libfdt_files)
index 0ef8ae6ac04799b3ae6344566585aa8a25930e5d..f8928ce282808cc02179c8d155f3b33948f7cc71 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/crc64.h>
 #include "crc64table.h"
 
 MODULE_DESCRIPTION("CRC64 calculations");
index 61261195f5b60b4c143adbca5967f979372798c5..48054dbf1b51fbd1bb6486bcb8b5de1786bc2304 100644 (file)
@@ -132,14 +132,18 @@ static void fill_pool(void)
        struct debug_obj *obj;
        unsigned long flags;
 
-       if (likely(obj_pool_free >= debug_objects_pool_min_level))
+       if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
                return;
 
        /*
         * Reuse objs from the global free list; they will be reinitialized
         * when allocating.
+        *
+        * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
+        * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
+        * sections.
         */
-       while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+       while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
                raw_spin_lock_irqsave(&pool_lock, flags);
                /*
                 * Recheck with the lock held as the worker thread might have
@@ -148,9 +152,9 @@ static void fill_pool(void)
                while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
                        obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
                        hlist_del(&obj->node);
-                       obj_nr_tofree--;
+                       WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
                        hlist_add_head(&obj->node, &obj_pool);
-                       obj_pool_free++;
+                       WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
                }
                raw_spin_unlock_irqrestore(&pool_lock, flags);
        }
@@ -158,7 +162,7 @@ static void fill_pool(void)
        if (unlikely(!obj_cache))
                return;
 
-       while (obj_pool_free < debug_objects_pool_min_level) {
+       while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
                struct debug_obj *new[ODEBUG_BATCH_SIZE];
                int cnt;
 
@@ -174,7 +178,7 @@ static void fill_pool(void)
                while (cnt) {
                        hlist_add_head(&new[--cnt]->node, &obj_pool);
                        debug_objects_allocated++;
-                       obj_pool_free++;
+                       WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
                }
                raw_spin_unlock_irqrestore(&pool_lock, flags);
        }
@@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
        obj = __alloc_object(&obj_pool);
        if (obj) {
                obj_pool_used++;
-               obj_pool_free--;
+               WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 
                /*
                 * Looking ahead, allocate one batch of debug objects and
@@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
                                               &percpu_pool->free_objs);
                                percpu_pool->obj_free++;
                                obj_pool_used++;
-                               obj_pool_free--;
+                               WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
                        }
                }
 
@@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work)
                obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
                hlist_del(&obj->node);
                hlist_add_head(&obj->node, &obj_pool);
-               obj_pool_free++;
-               obj_nr_tofree--;
+               WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+               WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
        }
        raw_spin_unlock_irqrestore(&pool_lock, flags);
        return;
@@ -324,7 +328,7 @@ free_objs:
        if (obj_nr_tofree) {
                hlist_move_list(&obj_to_free, &tofree);
                debug_objects_freed += obj_nr_tofree;
-               obj_nr_tofree = 0;
+               WRITE_ONCE(obj_nr_tofree, 0);
        }
        raw_spin_unlock_irqrestore(&pool_lock, flags);
 
@@ -375,10 +379,10 @@ free_to_obj_pool:
        obj_pool_used--;
 
        if (work) {
-               obj_nr_tofree++;
+               WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
                hlist_add_head(&obj->node, &obj_to_free);
                if (lookahead_count) {
-                       obj_nr_tofree += lookahead_count;
+                       WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
                        obj_pool_used -= lookahead_count;
                        while (lookahead_count) {
                                hlist_add_head(&objs[--lookahead_count]->node,
@@ -396,15 +400,15 @@ free_to_obj_pool:
                        for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
                                obj = __alloc_object(&obj_pool);
                                hlist_add_head(&obj->node, &obj_to_free);
-                               obj_pool_free--;
-                               obj_nr_tofree++;
+                               WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+                               WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
                        }
                }
        } else {
-               obj_pool_free++;
+               WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
                hlist_add_head(&obj->node, &obj_pool);
                if (lookahead_count) {
-                       obj_pool_free += lookahead_count;
+                       WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
                        obj_pool_used -= lookahead_count;
                        while (lookahead_count) {
                                hlist_add_head(&objs[--lookahead_count]->node,
@@ -423,7 +427,7 @@ free_to_obj_pool:
 static void free_object(struct debug_obj *obj)
 {
        __free_object(obj);
-       if (!obj_freeing && obj_nr_tofree) {
+       if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
                WRITE_ONCE(obj_freeing, true);
                schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
        }
@@ -982,7 +986,7 @@ repeat:
                debug_objects_maxchecked = objs_checked;
 
        /* Schedule work to actually kmem_cache_free() objects */
-       if (!obj_freeing && obj_nr_tofree) {
+       if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
                WRITE_ONCE(obj_freeing, true);
                schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
        }
@@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v)
        seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
        seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
        seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
-       seq_printf(m, "pool_free     :%d\n", obj_pool_free + obj_percpu_free);
+       seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
        seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
        seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
        seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
        seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
-       seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
+       seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
        seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
        seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
        return 0;
index f56070cf970b6a4ff9c760e598dceec489f2ec59..6ef51f159c54b97f41fffaa58660ed6d97726b1e 100644 (file)
@@ -8,7 +8,6 @@
 
 enum devm_ioremap_type {
        DEVM_IOREMAP = 0,
-       DEVM_IOREMAP_NC,
        DEVM_IOREMAP_UC,
        DEVM_IOREMAP_WC,
 };
@@ -37,9 +36,6 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
        case DEVM_IOREMAP:
                addr = ioremap(offset, size);
                break;
-       case DEVM_IOREMAP_NC:
-               addr = ioremap_nocache(offset, size);
-               break;
        case DEVM_IOREMAP_UC:
                addr = ioremap_uc(offset, size);
                break;
@@ -87,22 +83,6 @@ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
 }
 EXPORT_SYMBOL_GPL(devm_ioremap_uc);
 
-/**
- * devm_ioremap_nocache - Managed ioremap_nocache()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed ioremap_nocache().  Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
-                                  resource_size_t size)
-{
-       return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC);
-}
-EXPORT_SYMBOL(devm_ioremap_nocache);
-
 /**
  * devm_ioremap_wc - Managed ioremap_wc()
  * @dev: Generic device to remap IO address for
diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c
new file mode 100644 (file)
index 0000000..23610bc
--- /dev/null
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
index fb29c02c6a3c384cf597d0ec653dfc8c15470772..51595bf3af8505d4350f42bc323c7b73dc4232e6 100644 (file)
@@ -1222,11 +1222,12 @@ EXPORT_SYMBOL(iov_iter_discard);
 
 unsigned long iov_iter_alignment(const struct iov_iter *i)
 {
-       unsigned int p_mask = i->pipe->ring_size - 1;
        unsigned long res = 0;
        size_t size = i->count;
 
        if (unlikely(iov_iter_is_pipe(i))) {
+               unsigned int p_mask = i->pipe->ring_size - 1;
+
                if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
                        return size | i->iov_offset;
                return size;
index fe5c413efe96586a7f21e01a1376743781f55ae5..f0b5a1d24e558109d913dba7f9b6b44372f43df1 100644 (file)
@@ -60,36 +60,43 @@ static int ptr_id(void *ptr)
  */
 static void *shadow_get(void *obj, unsigned long id)
 {
-       void *ret = klp_shadow_get(obj, id);
+       int **sv;
 
+       sv = klp_shadow_get(obj, id);
        pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
-               __func__, ptr_id(obj), id, ptr_id(ret));
+               __func__, ptr_id(obj), id, ptr_id(sv));
 
-       return ret;
+       return sv;
 }
 
 static void *shadow_alloc(void *obj, unsigned long id, size_t size,
                          gfp_t gfp_flags, klp_shadow_ctor_t ctor,
                          void *ctor_data)
 {
-       void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor,
-                                    ctor_data);
+       int **var = ctor_data;
+       int **sv;
+
+       sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
        pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
                __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
-               ptr_id(ctor_data), ptr_id(ret));
-       return ret;
+               ptr_id(*var), ptr_id(sv));
+
+       return sv;
 }
 
 static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
                                 gfp_t gfp_flags, klp_shadow_ctor_t ctor,
                                 void *ctor_data)
 {
-       void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor,
-                                           ctor_data);
+       int **var = ctor_data;
+       int **sv;
+
+       sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
        pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
                __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
-               ptr_id(ctor_data), ptr_id(ret));
-       return ret;
+               ptr_id(*var), ptr_id(sv));
+
+       return sv;
 }
 
 static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
@@ -110,58 +117,70 @@ static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
 /* Shadow variable constructor - remember simple pointer data */
 static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
 {
-       int **shadow_int = shadow_data;
-       *shadow_int = ctor_data;
+       int **sv = shadow_data;
+       int **var = ctor_data;
+
+       if (!var)
+               return -EINVAL;
+
+       *sv = *var;
        pr_info("%s: PTR%d -> PTR%d\n",
-               __func__, ptr_id(shadow_int), ptr_id(ctor_data));
+               __func__, ptr_id(sv), ptr_id(*var));
 
        return 0;
 }
 
 static void shadow_dtor(void *obj, void *shadow_data)
 {
+       int **sv = shadow_data;
+
        pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
-               __func__, ptr_id(obj), ptr_id(shadow_data));
+               __func__, ptr_id(obj), ptr_id(sv));
 }
 
 static int test_klp_shadow_vars_init(void)
 {
        void *obj                       = THIS_MODULE;
        int id                  = 0x1234;
-       size_t size             = sizeof(int *);
        gfp_t gfp_flags         = GFP_KERNEL;
 
        int var1, var2, var3, var4;
+       int *pv1, *pv2, *pv3, *pv4;
        int **sv1, **sv2, **sv3, **sv4;
 
-       void *ret;
+       int **sv;
+
+       pv1 = &var1;
+       pv2 = &var2;
+       pv3 = &var3;
+       pv4 = &var4;
 
        ptr_id(NULL);
-       ptr_id(&var1);
-       ptr_id(&var2);
-       ptr_id(&var3);
-       ptr_id(&var4);
+       ptr_id(pv1);
+       ptr_id(pv2);
+       ptr_id(pv3);
+       ptr_id(pv4);
 
        /*
         * With an empty shadow variable hash table, expect not to find
         * any matches.
         */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        /*
         * Allocate a few shadow variables with different <obj> and <id>.
         */
-       sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1);
+       sv1 = shadow_alloc(obj, id, sizeof(pv1), gfp_flags, shadow_ctor, &pv1);
        if (!sv1)
                return -ENOMEM;
 
-       sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2);
+       sv2 = shadow_alloc(obj + 1, id, sizeof(pv2), gfp_flags, shadow_ctor, &pv2);
        if (!sv2)
                return -ENOMEM;
 
-       sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3);
+       sv3 = shadow_alloc(obj, id + 1, sizeof(pv3), gfp_flags, shadow_ctor, &pv3);
        if (!sv3)
                return -ENOMEM;
 
@@ -169,23 +188,23 @@ static int test_klp_shadow_vars_init(void)
         * Verify we can find our new shadow variables and that they point
         * to expected data.
         */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv1 && *sv1 == &var1)
+       if (sv == sv1 && *sv1 == pv1)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv1), ptr_id(*sv1));
 
-       ret = shadow_get(obj + 1, id);
-       if (!ret)
+       sv = shadow_get(obj + 1, id);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv2 && *sv2 == &var2)
+       if (sv == sv2 && *sv2 == pv2)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv2), ptr_id(*sv2));
-       ret = shadow_get(obj, id + 1);
-       if (!ret)
+       sv = shadow_get(obj, id + 1);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv3 && *sv3 == &var3)
+       if (sv == sv3 && *sv3 == pv3)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv3), ptr_id(*sv3));
 
@@ -193,14 +212,14 @@ static int test_klp_shadow_vars_init(void)
         * Allocate or get a few more, this time with the same <obj>, <id>.
         * The second invocation should return the same shadow var.
         */
-       sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
+       sv4 = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
        if (!sv4)
                return -ENOMEM;
 
-       ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
-       if (!ret)
+       sv = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv4 && *sv4 == &var4)
+       if (sv == sv4 && *sv4 == pv4)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv4), ptr_id(*sv4));
 
@@ -209,27 +228,27 @@ static int test_klp_shadow_vars_init(void)
         * longer find them.
         */
        shadow_free(obj, id, shadow_dtor);                      /* sv1 */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        shadow_free(obj + 1, id, shadow_dtor);                  /* sv2 */
-       ret = shadow_get(obj + 1, id);
-       if (!ret)
+       sv = shadow_get(obj + 1, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        shadow_free(obj + 2, id, shadow_dtor);                  /* sv4 */
-       ret = shadow_get(obj + 2, id);
-       if (!ret)
+       sv = shadow_get(obj + 2, id);
+       if (!sv)
                pr_info("  got expected NULL result\n");
 
        /*
         * We should still find an <id+1> variable.
         */
-       ret = shadow_get(obj, id + 1);
-       if (!ret)
+       sv = shadow_get(obj, id + 1);
+       if (!sv)
                return -EINVAL;
-       if (ret == sv3 && *sv3 == &var3)
+       if (sv == sv3 && *sv3 == pv3)
                pr_info("  got expected PTR%d -> PTR%d result\n",
                        ptr_id(sv3), ptr_id(*sv3));
 
@@ -237,8 +256,8 @@ static int test_klp_shadow_vars_init(void)
         * Free all the <id+1> variables, too.
         */
        shadow_free_all(id + 1, shadow_dtor);                   /* sv3 */
-       ret = shadow_get(obj, id);
-       if (!ret)
+       sv = shadow_get(obj, id);
+       if (!sv)
                pr_info("  shadow_get() got expected NULL result\n");
 
 
index 17417eee0866461fe0a0cd595c89c7778efd9809..bf1b4765c8f68eeef6cdb0c0f50105f7fe8c885f 100644 (file)
@@ -124,6 +124,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
 #define time_before(x, y) ((x) < (y))
 #endif
 
+#define RAID6_TEST_DISKS       8
+#define RAID6_TEST_DISKS_ORDER 3
+
 static inline const struct raid6_recov_calls *raid6_choose_recov(void)
 {
        const struct raid6_recov_calls *const *algo;
@@ -146,7 +149,7 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
 }
 
 static inline const struct raid6_calls *raid6_choose_gen(
-       void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks)
+       void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
 {
        unsigned long perf, bestgenperf, bestxorperf, j0, j1;
        int start = (disks>>1)-1, stop = disks-3;       /* work on the second half of the disks */
@@ -181,7 +184,8 @@ static inline const struct raid6_calls *raid6_choose_gen(
                                best = *algo;
                        }
                        pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
-                              (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+                               (perf * HZ * (disks-2)) >>
+                               (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
 
                        if (!(*algo)->xor_syndrome)
                                continue;
@@ -204,17 +208,24 @@ static inline const struct raid6_calls *raid6_choose_gen(
                                bestxorperf = perf;
 
                        pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
-                               (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
+                               (perf * HZ * (disks-2)) >>
+                               (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
                }
        }
 
        if (best) {
-               pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
-                      best->name,
-                      (bestgenperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
-               if (best->xor_syndrome)
-                       pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
-                              (bestxorperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
+               if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+                       pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
+                               best->name,
+                               (bestgenperf * HZ * (disks-2)) >>
+                               (20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2));
+                       if (best->xor_syndrome)
+                               pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
+                                       (bestxorperf * HZ * (disks-2)) >>
+                                       (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
+               } else
+                       pr_info("raid6: skip pq benchmark and using algorithm %s\n",
+                               best->name);
                raid6_call = *best;
        } else
                pr_err("raid6: Yikes!  No algorithm found!\n");
@@ -228,27 +239,33 @@ static inline const struct raid6_calls *raid6_choose_gen(
 
 int __init raid6_select_algo(void)
 {
-       const int disks = (65536/PAGE_SIZE)+2;
+       const int disks = RAID6_TEST_DISKS;
 
        const struct raid6_calls *gen_best;
        const struct raid6_recov_calls *rec_best;
-       char *syndromes;
-       void *dptrs[(65536/PAGE_SIZE)+2];
-       int i;
-
-       for (i = 0; i < disks-2; i++)
-               dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
-
-       /* Normal code - use a 2-page allocation to avoid D$ conflict */
-       syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
+       char *disk_ptr, *p;
+       void *dptrs[RAID6_TEST_DISKS];
+       int i, cycle;
 
-       if (!syndromes) {
+       /* prepare the buffer and fill it circularly with gfmul table */
+       disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
+       if (!disk_ptr) {
                pr_err("raid6: Yikes!  No memory available.\n");
                return -ENOMEM;
        }
 
-       dptrs[disks-2] = syndromes;
-       dptrs[disks-1] = syndromes + PAGE_SIZE;
+       p = disk_ptr;
+       for (i = 0; i < disks; i++)
+               dptrs[i] = p + PAGE_SIZE * i;
+
+       cycle = ((disks - 2) * PAGE_SIZE) / 65536;
+       for (i = 0; i < cycle; i++) {
+               memcpy(p, raid6_gfmul, 65536);
+               p += 65536;
+       }
+
+       if ((disks - 2) * PAGE_SIZE % 65536)
+               memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
 
        /* select raid gen_syndrome function */
        gen_best = raid6_choose_gen(&dptrs, disks);
@@ -256,7 +273,7 @@ int __init raid6_select_algo(void)
        /* select raid recover functions */
        rec_best = raid6_choose_recov();
 
-       free_pages((unsigned long)syndromes, 1);
+       free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
 
        return gen_best && rec_best ? 0 : -EINVAL;
 }
index 9c485df1308fbdba0dd79ae5f718f9f6bb4778bf..f02e10fa6238174c25662be782d1c6ccd1fea768 100644 (file)
@@ -56,8 +56,8 @@ int main(int argc, char *argv[])
        uint8_t v;
        uint8_t exptbl[256], invtbl[256];
 
-       printf("#include <linux/raid/pq.h>\n");
        printf("#include <linux/export.h>\n");
+       printf("#include <linux/raid/pq.h>\n");
 
        /* Compute multiplication table */
        printf("\nconst u8  __attribute__((aligned(256)))\n"
index c6aa03631df86d7837ea56d28d6059799803ef5b..0809805a7e231809387902b825fb2d89e7c43bd0 100644 (file)
@@ -13,7 +13,7 @@ BEGIN {
        for (i = 0; i < rep; ++i) {
                tmp = $0
                gsub(/\$\$/, i, tmp)
-               gsub(/\$\#/, n, tmp)
+               gsub(/\$#/, n, tmp)
                gsub(/\$\*/, "$", tmp)
                print tmp
        }
index 33feec8989f14054e0fc6d94762e75b871c88ad9..af88d1346dd74cf332b92a7e22442e7035f5d0a2 100644 (file)
@@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
        if (!sbq_wait->sbq) {
                sbq_wait->sbq = sbq;
                atomic_inc(&sbq->ws_active);
+               add_wait_queue(&ws->wait, &sbq_wait->wait);
        }
-       add_wait_queue(&ws->wait, &sbq_wait->wait);
 }
 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
 
index dccb95af600322cc6a3f7db1229d52b47a6b037b..706020b06617ce6fae8e0e9517259a6a0fcfcb78 100644 (file)
@@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
        unsigned long res = 0;
 
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
        if (IS_UNALIGNED(src, dst))
                goto byte_at_a_time;
 
@@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               /*
+                * Truncate 'max' to the user-specified limit, so that
+                * we only have one limit we need to check in the loop
+                */
+               if (max > count)
+                       max = count;
+
                kasan_check_write(dst, count);
                check_object_size(dst, count, false);
                if (user_access_begin(src, max)) {
index 6c0005d5dd5c43e7ed4097c24d5fc785a794110e..41670d4a5816530d7b948fe29f0fad20a44ec2ed 100644 (file)
@@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        unsigned long align, res = 0;
        unsigned long c;
 
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
        /*
         * Do everything aligned. But that means that we
         * need to also expand the maximum..
@@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               /*
+                * Truncate 'max' to the user-specified limit, so that
+                * we only have one limit we need to check in the loop
+                */
+               if (max > count)
+                       max = count;
+
                if (user_access_begin(str, max)) {
                        retval = do_strnlen_user(str, count, max);
                        user_access_end();
index 7df4f7f395bf2810913725747d29f1717eb4e1ec..55c14e8c885916b2ee96ff00a600dc8ae2a19664 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * test_xarray.c: Test the XArray API
  * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
  * Author: Matthew Wilcox <willy@infradead.org>
  */
 
@@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
 {
 #ifdef CONFIG_XARRAY_MULTI
+       unsigned long multi = 3 << order;
+       unsigned long next = 4 << order;
        unsigned long index;
 
-       xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
-       XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+       xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+       XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+       XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
 
        index = 0;
        XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(12));
-       XA_BUG_ON(xa, index != 12);
-       index = 13;
+                       xa_mk_value(multi));
+       XA_BUG_ON(xa, index != multi);
+       index = multi + 1;
        XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(12));
-       XA_BUG_ON(xa, (index < 12) || (index >= 16));
+                       xa_mk_value(multi));
+       XA_BUG_ON(xa, (index < multi) || (index >= next));
        XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(16));
-       XA_BUG_ON(xa, index != 16);
-
-       xa_erase_index(xa, 12);
-       xa_erase_index(xa, 16);
+                       xa_mk_value(next));
+       XA_BUG_ON(xa, index != next);
+       XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+       XA_BUG_ON(xa, index != next);
+
+       xa_erase_index(xa, multi);
+       xa_erase_index(xa, next);
+       xa_erase_index(xa, next + 1);
        XA_BUG_ON(xa, !xa_empty(xa));
 #endif
 }
@@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
        xa_destroy(xa);
 }
 
+static noinline void check_find_4(struct xarray *xa)
+{
+       unsigned long index = 0;
+       void *entry;
+
+       xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+       entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+       XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+       entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+       XA_BUG_ON(xa, entry);
+
+       xa_erase_index(xa, ULONG_MAX);
+}
+
 static noinline void check_find(struct xarray *xa)
 {
+       unsigned i;
+
        check_find_1(xa);
        check_find_2(xa);
        check_find_3(xa);
-       check_multi_find(xa);
+       check_find_4(xa);
+
+       for (i = 2; i < 10; i++)
+               check_multi_find_1(xa, i);
        check_multi_find_2(xa);
 }
 
@@ -1132,6 +1160,27 @@ static noinline void check_move_tiny(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_move_max(struct xarray *xa)
+{
+       XA_STATE(xas, xa, 0);
+
+       xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+       rcu_read_unlock();
+
+       xas_set(&xas, 0);
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+       xas_pause(&xas);
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+       rcu_read_unlock();
+
+       xa_erase_index(xa, ULONG_MAX);
+       XA_BUG_ON(xa, !xa_empty(xa));
+}
+
 static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 {
        XA_STATE(xas, xa, 0);
@@ -1240,6 +1289,7 @@ static noinline void check_move(struct xarray *xa)
        xa_destroy(xa);
 
        check_move_tiny(xa);
+       check_move_max(xa);
 
        for (i = 0; i < 16; i++)
                check_move_small(xa, 1UL << i);
index 9fe698ff62ec4ffcaaf6c9e838c358bc31a57bca..d883ac29950897801f3a597cc90d42ae6d696524 100644 (file)
@@ -24,4 +24,10 @@ config GENERIC_COMPAT_VDSO
        help
          This config option enables the compat VDSO layer.
 
+config GENERIC_VDSO_TIME_NS
+       bool
+       help
+         Selected by architectures which support time namespaces in the
+         VDSO
+
 endif
index 9ecfd3b547bae2024fa54d9d5635177b9c36ae02..f8b8ec5e63aca5e5ffcbf2de5c902c0080cf84b0 100644 (file)
@@ -38,12 +38,22 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
 }
 #endif
 
-static int do_hres(const struct vdso_data *vd, clockid_t clk,
-                  struct __kernel_timespec *ts)
+#ifdef CONFIG_TIME_NS
+static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+                         struct __kernel_timespec *ts)
 {
-       const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
-       u64 cycles, last, sec, ns;
+       const struct vdso_data *vd = __arch_get_timens_vdso_data();
+       const struct timens_offset *offs = &vdns->offset[clk];
+       const struct vdso_timestamp *vdso_ts;
+       u64 cycles, last, ns;
        u32 seq;
+       s64 sec;
+
+       if (clk != CLOCK_MONOTONIC_RAW)
+               vd = &vd[CS_HRES_COARSE];
+       else
+               vd = &vd[CS_RAW];
+       vdso_ts = &vd->basetime[clk];
 
        do {
                seq = vdso_read_begin(vd);
@@ -58,6 +68,10 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
                sec = vdso_ts->sec;
        } while (unlikely(vdso_read_retry(vd, seq)));
 
+       /* Add the namespace offset */
+       sec += offs->sec;
+       ns += offs->nsec;
+
        /*
         * Do this outside the loop: a race inside the loop could result
         * in __iter_div_u64_rem() being extremely slow.
@@ -67,18 +81,128 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
 
        return 0;
 }
+#else
+static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+{
+       return NULL;
+}
+
+static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+                         struct __kernel_timespec *ts)
+{
+       return -EINVAL;
+}
+#endif
 
-static void do_coarse(const struct vdso_data *vd, clockid_t clk,
-                     struct __kernel_timespec *ts)
+static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
+                                  struct __kernel_timespec *ts)
 {
        const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+       u64 cycles, last, sec, ns;
        u32 seq;
 
+       do {
+               /*
+                * Open coded to handle VCLOCK_TIMENS. Time namespace
+                * enabled tasks have a special VVAR page installed which
+                * has vd->seq set to 1 and vd->clock_mode set to
+                * VCLOCK_TIMENS. For non time namespace affected tasks
+                * this does not affect performance because if vd->seq is
+                * odd, i.e. a concurrent update is in progress the extra
+                * check for vd->clock_mode is just a few extra
+                * instructions while spin waiting for vd->seq to become
+                * even again.
+                */
+               while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
+                       if (IS_ENABLED(CONFIG_TIME_NS) &&
+                           vd->clock_mode == VCLOCK_TIMENS)
+                               return do_hres_timens(vd, clk, ts);
+                       cpu_relax();
+               }
+               smp_rmb();
+
+               cycles = __arch_get_hw_counter(vd->clock_mode);
+               ns = vdso_ts->nsec;
+               last = vd->cycle_last;
+               if (unlikely((s64)cycles < 0))
+                       return -1;
+
+               ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
+               ns >>= vd->shift;
+               sec = vdso_ts->sec;
+       } while (unlikely(vdso_read_retry(vd, seq)));
+
+       /*
+        * Do this outside the loop: a race inside the loop could result
+        * in __iter_div_u64_rem() being extremely slow.
+        */
+       ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+       ts->tv_nsec = ns;
+
+       return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+                           struct __kernel_timespec *ts)
+{
+       const struct vdso_data *vd = __arch_get_timens_vdso_data();
+       const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+       const struct timens_offset *offs = &vdns->offset[clk];
+       u64 nsec;
+       s64 sec;
+       s32 seq;
+
        do {
                seq = vdso_read_begin(vd);
+               sec = vdso_ts->sec;
+               nsec = vdso_ts->nsec;
+       } while (unlikely(vdso_read_retry(vd, seq)));
+
+       /* Add the namespace offset */
+       sec += offs->sec;
+       nsec += offs->nsec;
+
+       /*
+        * Do this outside the loop: a race inside the loop could result
+        * in __iter_div_u64_rem() being extremely slow.
+        */
+       ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
+       ts->tv_nsec = nsec;
+       return 0;
+}
+#else
+static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+                           struct __kernel_timespec *ts)
+{
+       return -1;
+}
+#endif
+
+static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
+                                    struct __kernel_timespec *ts)
+{
+       const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+       u32 seq;
+
+       do {
+               /*
+                * Open coded to handle VCLOCK_TIMENS. See comment in
+                * do_hres().
+                */
+               while ((seq = READ_ONCE(vd->seq)) & 1) {
+                       if (IS_ENABLED(CONFIG_TIME_NS) &&
+                           vd->clock_mode == VCLOCK_TIMENS)
+                               return do_coarse_timens(vd, clk, ts);
+                       cpu_relax();
+               }
+               smp_rmb();
+
                ts->tv_sec = vdso_ts->sec;
                ts->tv_nsec = vdso_ts->nsec;
        } while (unlikely(vdso_read_retry(vd, seq)));
+
+       return 0;
 }
 
 static __maybe_unused int
@@ -96,15 +220,16 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
         * clocks are handled in the VDSO directly.
         */
        msk = 1U << clock;
-       if (likely(msk & VDSO_HRES)) {
-               return do_hres(&vd[CS_HRES_COARSE], clock, ts);
-       } else if (msk & VDSO_COARSE) {
-               do_coarse(&vd[CS_HRES_COARSE], clock, ts);
-               return 0;
-       } else if (msk & VDSO_RAW) {
-               return do_hres(&vd[CS_RAW], clock, ts);
-       }
-       return -1;
+       if (likely(msk & VDSO_HRES))
+               vd = &vd[CS_HRES_COARSE];
+       else if (msk & VDSO_COARSE)
+               return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+       else if (msk & VDSO_RAW)
+               vd = &vd[CS_RAW];
+       else
+               return -1;
+
+       return do_hres(vd, clock, ts);
 }
 
 static __maybe_unused int
@@ -117,6 +242,7 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
        return 0;
 }
 
+#ifdef BUILD_VDSO32
 static __maybe_unused int
 __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
 {
@@ -125,20 +251,16 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
 
        ret = __cvdso_clock_gettime_common(clock, &ts);
 
-#ifdef VDSO_HAS_32BIT_FALLBACK
        if (unlikely(ret))
                return clock_gettime32_fallback(clock, res);
-#else
-       if (unlikely(ret))
-               ret = clock_gettime_fallback(clock, &ts);
-#endif
 
-       if (likely(!ret)) {
-               res->tv_sec = ts.tv_sec;
-               res->tv_nsec = ts.tv_nsec;
-       }
+       /* For ret == 0 */
+       res->tv_sec = ts.tv_sec;
+       res->tv_nsec = ts.tv_nsec;
+
        return ret;
 }
+#endif /* BUILD_VDSO32 */
 
 static __maybe_unused int
 __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
@@ -156,6 +278,10 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
        }
 
        if (unlikely(tz != NULL)) {
+               if (IS_ENABLED(CONFIG_TIME_NS) &&
+                   vd->clock_mode == VCLOCK_TIMENS)
+                       vd = __arch_get_timens_vdso_data();
+
                tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
                tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
        }
@@ -167,7 +293,12 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
 static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
 {
        const struct vdso_data *vd = __arch_get_vdso_data();
-       __kernel_old_time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+       __kernel_old_time_t t;
+
+       if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+               vd = __arch_get_timens_vdso_data();
+
+       t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
 
        if (time)
                *time = t;
@@ -181,7 +312,6 @@ static __maybe_unused
 int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
 {
        const struct vdso_data *vd = __arch_get_vdso_data();
-       u64 hrtimer_res;
        u32 msk;
        u64 ns;
 
@@ -189,27 +319,24 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
        if (unlikely((u32) clock >= MAX_CLOCKS))
                return -1;
 
-       hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+       if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+               vd = __arch_get_timens_vdso_data();
+
        /*
         * Convert the clockid to a bitmask and use it to check which
         * clocks are handled in the VDSO directly.
         */
        msk = 1U << clock;
-       if (msk & VDSO_HRES) {
+       if (msk & (VDSO_HRES | VDSO_RAW)) {
                /*
                 * Preserves the behaviour of posix_get_hrtimer_res().
                 */
-               ns = hrtimer_res;
+               ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
        } else if (msk & VDSO_COARSE) {
                /*
                 * Preserves the behaviour of posix_get_coarse_res().
                 */
                ns = LOW_RES_NSEC;
-       } else if (msk & VDSO_RAW) {
-               /*
-                * Preserves the behaviour of posix_get_hrtimer_res().
-                */
-               ns = hrtimer_res;
        } else {
                return -1;
        }
@@ -221,6 +348,7 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
        return 0;
 }
 
+static __maybe_unused
 int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
 {
        int ret = __cvdso_clock_getres_common(clock, res);
@@ -230,6 +358,7 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
        return 0;
 }
 
+#ifdef BUILD_VDSO32
 static __maybe_unused int
 __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
 {
@@ -238,18 +367,14 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
 
        ret = __cvdso_clock_getres_common(clock, &ts);
 
-#ifdef VDSO_HAS_32BIT_FALLBACK
        if (unlikely(ret))
                return clock_getres32_fallback(clock, res);
-#else
-       if (unlikely(ret))
-               ret = clock_getres_fallback(clock, &ts);
-#endif
 
-       if (likely(!ret && res)) {
+       if (likely(res)) {
                res->tv_sec = ts.tv_sec;
                res->tv_nsec = ts.tv_nsec;
        }
        return ret;
 }
+#endif /* BUILD_VDSO32 */
 #endif /* VDSO_HAS_CLOCK_GETRES */
index 1237c213f52bc08ad3f58387aa603979c7cafbcd..1d9fab7db8dad5f0cf693e4ee242db7306f980cf 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
  * XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
  * Author: Matthew Wilcox <willy@infradead.org>
  */
 
@@ -967,6 +968,7 @@ void xas_pause(struct xa_state *xas)
        if (xas_invalid(xas))
                return;
 
+       xas->xa_node = XAS_RESTART;
        if (node) {
                unsigned int offset = xas->xa_offset;
                while (++offset < XA_CHUNK_SIZE) {
@@ -974,10 +976,11 @@ void xas_pause(struct xa_state *xas)
                                break;
                }
                xas->xa_index += (offset - xas->xa_offset) << node->shift;
+               if (xas->xa_index == 0)
+                       xas->xa_node = XAS_BOUNDS;
        } else {
                xas->xa_index++;
        }
-       xas->xa_node = XAS_RESTART;
 }
 EXPORT_SYMBOL_GPL(xas_pause);
 
@@ -1079,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max)
 {
        void *entry;
 
-       if (xas_error(xas))
+       if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
                return NULL;
+       if (xas->xa_index > max)
+               return set_bounds(xas);
 
        if (!xas->xa_node) {
                xas->xa_index = 1;
                return set_bounds(xas);
-       } else if (xas_top(xas->xa_node)) {
+       } else if (xas->xa_node == XAS_RESTART) {
                entry = xas_load(xas);
                if (entry || xas_not_node(xas->xa_node))
                        return entry;
@@ -1150,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
 
        if (xas_error(xas))
                return NULL;
+       if (xas->xa_index > max)
+               goto max;
 
        if (!xas->xa_node) {
                xas->xa_index = 1;
@@ -1824,6 +1831,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
 }
 EXPORT_SYMBOL(xa_find);
 
+static bool xas_sibling(struct xa_state *xas)
+{
+       struct xa_node *node = xas->xa_node;
+       unsigned long mask;
+
+       if (!node)
+               return false;
+       mask = (XA_CHUNK_SIZE << node->shift) - 1;
+       return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+}
+
 /**
  * xa_find_after() - Search the XArray for a present entry.
  * @xa: XArray.
@@ -1847,21 +1865,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
        XA_STATE(xas, xa, *indexp + 1);
        void *entry;
 
+       if (xas.xa_index == 0)
+               return NULL;
+
        rcu_read_lock();
        for (;;) {
                if ((__force unsigned int)filter < XA_MAX_MARKS)
                        entry = xas_find_marked(&xas, max, filter);
                else
                        entry = xas_find(&xas, max);
-               if (xas.xa_node == XAS_BOUNDS)
+
+               if (xas_invalid(&xas))
                        break;
-               if (xas.xa_shift) {
-                       if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
-                               continue;
-               } else {
-                       if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
-                               continue;
-               }
+               if (xas_sibling(&xas))
+                       continue;
                if (!xas_retry(&xas, entry))
                        break;
        }
index 7dd602d7f8db7be0f698021b01d8fc9810d25e5b..ad9d5b1c44739755146c17f8e5a5f6ff4659bc50 100644 (file)
@@ -26,6 +26,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
        unsigned long i, nr_pages, addr, next;
        int nr;
        struct page **pages;
+       int ret = 0;
 
        if (gup->size > ULONG_MAX)
                return -EINVAL;
@@ -63,7 +64,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
                                            NULL);
                        break;
                default:
-                       return -1;
+                       kvfree(pages);
+                       ret = -EINVAL;
+                       goto out;
                }
 
                if (nr <= 0)
@@ -85,7 +88,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
        gup->put_delta_usec = ktime_us_delta(end_time, start_time);
 
        kvfree(pages);
-       return 0;
+out:
+       return ret;
 }
 
 static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
index 107b10f9878ef73bb936721728d28b39612884bd..64d8dea47dd1a81e575d40b641622bf1548e07e2 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/highmem.h>
 #include <linux/kgdb.h>
 #include <asm/tlbflush.h>
-
+#include <linux/vmalloc.h>
 
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
 DEFINE_PER_CPU(int, __kmap_atomic_idx);
index 41a0fbddc96bb27a0717e4033bbbaa511aac4899..a880932136740ddbc6bbf1b4b5c04bfd74f9c8ef 100644 (file)
@@ -527,13 +527,13 @@ void prep_transhuge_page(struct page *page)
        set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
 }
 
-static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
+static unsigned long __thp_get_unmapped_area(struct file *filp,
+               unsigned long addr, unsigned long len,
                loff_t off, unsigned long flags, unsigned long size)
 {
-       unsigned long addr;
        loff_t off_end = off + len;
        loff_t off_align = round_up(off, size);
-       unsigned long len_pad;
+       unsigned long len_pad, ret;
 
        if (off_end <= off_align || (off_end - off_align) < size)
                return 0;
@@ -542,30 +542,40 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long le
        if (len_pad < len || (off + len_pad) < off)
                return 0;
 
-       addr = current->mm->get_unmapped_area(filp, 0, len_pad,
+       ret = current->mm->get_unmapped_area(filp, addr, len_pad,
                                              off >> PAGE_SHIFT, flags);
-       if (IS_ERR_VALUE(addr))
+
+       /*
+        * The failure might be due to length padding. The caller will retry
+        * without the padding.
+        */
+       if (IS_ERR_VALUE(ret))
                return 0;
 
-       addr += (off - addr) & (size - 1);
-       return addr;
+       /*
+        * Do not try to align to THP boundary if allocation at the address
+        * hint succeeds.
+        */
+       if (ret == addr)
+               return addr;
+
+       ret += (off - ret) & (size - 1);
+       return ret;
 }
 
 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
+       unsigned long ret;
        loff_t off = (loff_t)pgoff << PAGE_SHIFT;
 
-       if (addr)
-               goto out;
        if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
                goto out;
 
-       addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
-       if (addr)
-               return addr;
-
- out:
+       ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
+       if (ret)
+               return ret;
+out:
        return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
index ac65bb5e38ac267dec41c3e09bdaa53501da4c9a..dd8737a94bec42c8458e8d7c6201cf24d163239c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/swapops.h>
 #include <linux/jhash.h>
 #include <linux/numa.h>
+#include <linux/llist.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -1136,7 +1137,7 @@ static inline void ClearPageHugeTemporary(struct page *page)
        page[2].mapping = NULL;
 }
 
-void free_huge_page(struct page *page)
+static void __free_huge_page(struct page *page)
 {
        /*
         * Can't pass hstate in here because it is called from the
@@ -1199,6 +1200,54 @@ void free_huge_page(struct page *page)
        spin_unlock(&hugetlb_lock);
 }
 
+/*
+ * As free_huge_page() can be called from a non-task context, we have
+ * to defer the actual freeing in a workqueue to prevent potential
+ * hugetlb_lock deadlock.
+ *
+ * free_hpage_workfn() locklessly retrieves the linked list of pages to
+ * be freed and frees them one-by-one. As the page->mapping pointer is
+ * going to be cleared in __free_huge_page() anyway, it is reused as the
+ * llist_node structure of a lockless linked list of huge pages to be freed.
+ */
+static LLIST_HEAD(hpage_freelist);
+
+static void free_hpage_workfn(struct work_struct *work)
+{
+       struct llist_node *node;
+       struct page *page;
+
+       node = llist_del_all(&hpage_freelist);
+
+       while (node) {
+               page = container_of((struct address_space **)node,
+                                    struct page, mapping);
+               node = node->next;
+               __free_huge_page(page);
+       }
+}
+static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
+
+void free_huge_page(struct page *page)
+{
+       /*
+        * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
+        */
+       if (!in_task()) {
+               /*
+                * Only call schedule_work() if hpage_freelist is previously
+                * empty. Otherwise, schedule_work() had been called but the
+                * workfn hasn't retrieved the list yet.
+                */
+               if (llist_add((struct llist_node *)&page->mapping,
+                             &hpage_freelist))
+                       schedule_work(&free_hpage_work);
+               return;
+       }
+
+       __free_huge_page(page);
+}
+
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        INIT_LIST_HEAD(&page->lru);
index 2ac38bdc18a1f807a5cf103662eeb77ea651b1cc..e434b05416c680b09fdc67065026c9fe292b92f3 100644 (file)
@@ -3,6 +3,10 @@
  * Copyright IBM Corporation, 2012
  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  *
+ * Cgroup v2
+ * Copyright (C) 2019 Red Hat, Inc.
+ * Author: Giuseppe Scrivano <gscrivan@redhat.com>
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2.1 of the GNU Lesser General Public License
  * as published by the Free Software Foundation.
 #include <linux/hugetlb.h>
 #include <linux/hugetlb_cgroup.h>
 
+enum hugetlb_memory_event {
+       HUGETLB_MAX,
+       HUGETLB_NR_MEMORY_EVENTS,
+};
+
 struct hugetlb_cgroup {
        struct cgroup_subsys_state css;
+
        /*
         * the counter to account for hugepages from hugetlb.
         */
        struct page_counter hugepage[HUGE_MAX_HSTATE];
+
+       atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+       atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+
+       /* Handle for "hugetlb.events" */
+       struct cgroup_file events_file[HUGE_MAX_HSTATE];
+
+       /* Handle for "hugetlb.events.local" */
+       struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
 };
 
 #define MEMFILE_PRIVATE(x, val)        (((x) << 16) | (val))
 #define MEMFILE_IDX(val)       (((val) >> 16) & 0xffff)
 #define MEMFILE_ATTR(val)      ((val) & 0xffff)
 
+#define hugetlb_cgroup_from_counter(counter, idx)                   \
+       container_of(counter, struct hugetlb_cgroup, hugepage[idx])
+
 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
 
 static inline
@@ -178,6 +200,19 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
        } while (hugetlb_cgroup_have_usage(h_cg));
 }
 
+static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
+                                enum hugetlb_memory_event event)
+{
+       atomic_long_inc(&hugetlb->events_local[idx][event]);
+       cgroup_file_notify(&hugetlb->events_local_file[idx]);
+
+       do {
+               atomic_long_inc(&hugetlb->events[idx][event]);
+               cgroup_file_notify(&hugetlb->events_file[idx]);
+       } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
+                !hugetlb_cgroup_is_root(hugetlb));
+}
+
 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                 struct hugetlb_cgroup **ptr)
 {
@@ -202,8 +237,12 @@ again:
        }
        rcu_read_unlock();
 
-       if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter))
+       if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages,
+                                    &counter)) {
                ret = -ENOMEM;
+               hugetlb_event(hugetlb_cgroup_from_counter(counter, idx), idx,
+                             HUGETLB_MAX);
+       }
        css_put(&h_cg->css);
 done:
        *ptr = h_cg;
@@ -283,10 +322,45 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
        }
 }
 
+static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
+{
+       int idx;
+       u64 val;
+       struct cftype *cft = seq_cft(seq);
+       unsigned long limit;
+       struct page_counter *counter;
+       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
+
+       idx = MEMFILE_IDX(cft->private);
+       counter = &h_cg->hugepage[idx];
+
+       limit = round_down(PAGE_COUNTER_MAX,
+                          1 << huge_page_order(&hstates[idx]));
+
+       switch (MEMFILE_ATTR(cft->private)) {
+       case RES_USAGE:
+               val = (u64)page_counter_read(counter);
+               seq_printf(seq, "%llu\n", val * PAGE_SIZE);
+               break;
+       case RES_LIMIT:
+               val = (u64)counter->max;
+               if (val == limit)
+                       seq_puts(seq, "max\n");
+               else
+                       seq_printf(seq, "%llu\n", val * PAGE_SIZE);
+               break;
+       default:
+               BUG();
+       }
+
+       return 0;
+}
+
 static DEFINE_MUTEX(hugetlb_limit_mutex);
 
 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
-                                   char *buf, size_t nbytes, loff_t off)
+                                   char *buf, size_t nbytes, loff_t off,
+                                   const char *max)
 {
        int ret, idx;
        unsigned long nr_pages;
@@ -296,7 +370,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
                return -EINVAL;
 
        buf = strstrip(buf);
-       ret = page_counter_memparse(buf, "-1", &nr_pages);
+       ret = page_counter_memparse(buf, max, &nr_pages);
        if (ret)
                return ret;
 
@@ -316,6 +390,18 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
        return ret ?: nbytes;
 }
 
+static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
+                                          char *buf, size_t nbytes, loff_t off)
+{
+       return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
+}
+
+static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
+                                       char *buf, size_t nbytes, loff_t off)
+{
+       return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
+}
+
 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
                                    char *buf, size_t nbytes, loff_t off)
 {
@@ -350,7 +436,36 @@ static char *mem_fmt(char *buf, int size, unsigned long hsize)
        return buf;
 }
 
-static void __init __hugetlb_cgroup_file_init(int idx)
+static int __hugetlb_events_show(struct seq_file *seq, bool local)
+{
+       int idx;
+       long max;
+       struct cftype *cft = seq_cft(seq);
+       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
+
+       idx = MEMFILE_IDX(cft->private);
+
+       if (local)
+               max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
+       else
+               max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
+
+       seq_printf(seq, "max %lu\n", max);
+
+       return 0;
+}
+
+static int hugetlb_events_show(struct seq_file *seq, void *v)
+{
+       return __hugetlb_events_show(seq, false);
+}
+
+static int hugetlb_events_local_show(struct seq_file *seq, void *v)
+{
+       return __hugetlb_events_show(seq, true);
+}
+
+static void __init __hugetlb_cgroup_file_dfl_init(int idx)
 {
        char buf[32];
        struct cftype *cft;
@@ -360,38 +475,93 @@ static void __init __hugetlb_cgroup_file_init(int idx)
        mem_fmt(buf, 32, huge_page_size(h));
 
        /* Add the limit file */
-       cft = &h->cgroup_files[0];
+       cft = &h->cgroup_files_dfl[0];
+       snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
+       cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
+       cft->seq_show = hugetlb_cgroup_read_u64_max;
+       cft->write = hugetlb_cgroup_write_dfl;
+       cft->flags = CFTYPE_NOT_ON_ROOT;
+
+       /* Add the current usage file */
+       cft = &h->cgroup_files_dfl[1];
+       snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
+       cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
+       cft->seq_show = hugetlb_cgroup_read_u64_max;
+       cft->flags = CFTYPE_NOT_ON_ROOT;
+
+       /* Add the events file */
+       cft = &h->cgroup_files_dfl[2];
+       snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
+       cft->private = MEMFILE_PRIVATE(idx, 0);
+       cft->seq_show = hugetlb_events_show;
+       cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]),
+       cft->flags = CFTYPE_NOT_ON_ROOT;
+
+       /* Add the events.local file */
+       cft = &h->cgroup_files_dfl[3];
+       snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
+       cft->private = MEMFILE_PRIVATE(idx, 0);
+       cft->seq_show = hugetlb_events_local_show;
+       cft->file_offset = offsetof(struct hugetlb_cgroup,
+                                   events_local_file[idx]),
+       cft->flags = CFTYPE_NOT_ON_ROOT;
+
+       /* NULL terminate the last cft */
+       cft = &h->cgroup_files_dfl[4];
+       memset(cft, 0, sizeof(*cft));
+
+       WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
+                                      h->cgroup_files_dfl));
+}
+
+static void __init __hugetlb_cgroup_file_legacy_init(int idx)
+{
+       char buf[32];
+       struct cftype *cft;
+       struct hstate *h = &hstates[idx];
+
+       /* format the size */
+       mem_fmt(buf, 32, huge_page_size(h));
+
+       /* Add the limit file */
+       cft = &h->cgroup_files_legacy[0];
        snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
        cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
        cft->read_u64 = hugetlb_cgroup_read_u64;
-       cft->write = hugetlb_cgroup_write;
+       cft->write = hugetlb_cgroup_write_legacy;
 
        /* Add the usage file */
-       cft = &h->cgroup_files[1];
+       cft = &h->cgroup_files_legacy[1];
        snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
        cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
        cft->read_u64 = hugetlb_cgroup_read_u64;
 
        /* Add the MAX usage file */
-       cft = &h->cgroup_files[2];
+       cft = &h->cgroup_files_legacy[2];
        snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
        cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
        cft->write = hugetlb_cgroup_reset;
        cft->read_u64 = hugetlb_cgroup_read_u64;
 
        /* Add the failcntfile */
-       cft = &h->cgroup_files[3];
+       cft = &h->cgroup_files_legacy[3];
        snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
        cft->private  = MEMFILE_PRIVATE(idx, RES_FAILCNT);
        cft->write = hugetlb_cgroup_reset;
        cft->read_u64 = hugetlb_cgroup_read_u64;
 
        /* NULL terminate the last cft */
-       cft = &h->cgroup_files[4];
+       cft = &h->cgroup_files_legacy[4];
        memset(cft, 0, sizeof(*cft));
 
        WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
-                                         h->cgroup_files));
+                                         h->cgroup_files_legacy));
+}
+
+static void __init __hugetlb_cgroup_file_init(int idx)
+{
+       __hugetlb_cgroup_file_dfl_init(idx);
+       __hugetlb_cgroup_file_legacy_init(idx);
 }
 
 void __init hugetlb_cgroup_file_init(void)
@@ -433,8 +603,14 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
        return;
 }
 
+static struct cftype hugetlb_files[] = {
+       {} /* terminate */
+};
+
 struct cgroup_subsys hugetlb_cgrp_subsys = {
        .css_alloc      = hugetlb_cgroup_css_alloc,
        .css_offline    = hugetlb_cgroup_css_offline,
        .css_free       = hugetlb_cgroup_css_free,
+       .dfl_cftypes    = hugetlb_files,
+       .legacy_cftypes = hugetlb_files,
 };
index 2fa710bb6358a7d8bf2ed483c3adf324b8386e55..c15d8ae68c9673b2fd62b0a7b7eab2aa387aec2e 100644 (file)
@@ -778,15 +778,17 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
        return 0;
 }
 
-int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 {
        unsigned long shadow_start, shadow_end;
        int ret;
 
-       shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
+       if (!is_vmalloc_or_module_addr((void *)addr))
+               return 0;
+
+       shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
        shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
-       shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
-                                                       area->size);
+       shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
        shadow_end = ALIGN(shadow_end, PAGE_SIZE);
 
        ret = apply_to_page_range(&init_mm, shadow_start,
@@ -797,10 +799,6 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
 
        flush_cache_vmap(shadow_start, shadow_end);
 
-       kasan_unpoison_shadow(area->addr, requested_size);
-
-       area->flags |= VM_KASAN;
-
        /*
         * We need to be careful about inter-cpu effects here. Consider:
         *
@@ -843,12 +841,23 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
  * Poison the shadow for a vmalloc region. Called as part of the
  * freeing process at the time the region is freed.
  */
-void kasan_poison_vmalloc(void *start, unsigned long size)
+void kasan_poison_vmalloc(const void *start, unsigned long size)
 {
+       if (!is_vmalloc_or_module_addr(start))
+               return;
+
        size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
        kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
 }
 
+void kasan_unpoison_vmalloc(const void *start, unsigned long size)
+{
+       if (!is_vmalloc_or_module_addr(start))
+               return;
+
+       kasan_unpoison_shadow(start, size);
+}
+
 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
                                        void *unused)
 {
@@ -948,6 +957,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
 {
        void *shadow_start, *shadow_end;
        unsigned long region_start, region_end;
+       unsigned long size;
 
        region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
        region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
@@ -970,9 +980,11 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
        shadow_end = kasan_mem_to_shadow((void *)region_end);
 
        if (shadow_end > shadow_start) {
-               apply_to_page_range(&init_mm, (unsigned long)shadow_start,
-                                   (unsigned long)(shadow_end - shadow_start),
-                                   kasan_depopulate_vmalloc_pte, NULL);
+               size = shadow_end - shadow_start;
+               apply_to_existing_page_range(&init_mm,
+                                            (unsigned long)shadow_start,
+                                            size, kasan_depopulate_vmalloc_pte,
+                                            NULL);
                flush_tlb_kernel_range((unsigned long)shadow_start,
                                       (unsigned long)shadow_end);
        }
index c5b5f74cfd4debb16651552d29a5dfd688c87093..6c83cf4ed970b90e46b29d56a75ba20de4d580b9 100644 (file)
@@ -3287,49 +3287,34 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
        }
 }
 
-static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
+static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
 {
-       unsigned long stat[MEMCG_NR_STAT];
+       unsigned long stat[MEMCG_NR_STAT] = {0};
        struct mem_cgroup *mi;
        int node, cpu, i;
-       int min_idx, max_idx;
-
-       if (slab_only) {
-               min_idx = NR_SLAB_RECLAIMABLE;
-               max_idx = NR_SLAB_UNRECLAIMABLE;
-       } else {
-               min_idx = 0;
-               max_idx = MEMCG_NR_STAT;
-       }
-
-       for (i = min_idx; i < max_idx; i++)
-               stat[i] = 0;
 
        for_each_online_cpu(cpu)
-               for (i = min_idx; i < max_idx; i++)
+               for (i = 0; i < MEMCG_NR_STAT; i++)
                        stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
 
        for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
-               for (i = min_idx; i < max_idx; i++)
+               for (i = 0; i < MEMCG_NR_STAT; i++)
                        atomic_long_add(stat[i], &mi->vmstats[i]);
 
-       if (!slab_only)
-               max_idx = NR_VM_NODE_STAT_ITEMS;
-
        for_each_node(node) {
                struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
                struct mem_cgroup_per_node *pi;
 
-               for (i = min_idx; i < max_idx; i++)
+               for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
                        stat[i] = 0;
 
                for_each_online_cpu(cpu)
-                       for (i = min_idx; i < max_idx; i++)
+                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
                                stat[i] += per_cpu(
                                        pn->lruvec_stat_cpu->count[i], cpu);
 
                for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
-                       for (i = min_idx; i < max_idx; i++)
+                       for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
                                atomic_long_add(stat[i], &pi->lruvec_stat[i]);
        }
 }
@@ -3403,13 +3388,9 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
                parent = root_mem_cgroup;
 
        /*
-        * Deactivate and reparent kmem_caches. Then flush percpu
-        * slab statistics to have precise values at the parent and
-        * all ancestor levels. It's required to keep slab stats
-        * accurate after the reparenting of kmem_caches.
+        * Deactivate and reparent kmem_caches.
         */
        memcg_deactivate_kmem_caches(memcg, parent);
-       memcg_flush_percpu_vmstats(memcg, true);
 
        kmemcg_id = memcg->kmemcg_id;
        BUG_ON(kmemcg_id < 0);
@@ -4913,7 +4894,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
         * Flush percpu vmstats and vmevents to guarantee the value correctness
         * on parent's and all ancestor levels.
         */
-       memcg_flush_percpu_vmstats(memcg, false);
+       memcg_flush_percpu_vmstats(memcg);
        memcg_flush_percpu_vmevents(memcg);
        __mem_cgroup_free(memcg);
 }
index 606da187d1de904c226d3d53f56d696ab3d8964a..1c4be871a23728e5638ac08f7770f348f786d07a 100644 (file)
@@ -2021,26 +2021,34 @@ EXPORT_SYMBOL(vm_iomap_memory);
 
 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data)
+                                    pte_fn_t fn, void *data, bool create)
 {
        pte_t *pte;
-       int err;
+       int err = 0;
        spinlock_t *uninitialized_var(ptl);
 
-       pte = (mm == &init_mm) ?
-               pte_alloc_kernel(pmd, addr) :
-               pte_alloc_map_lock(mm, pmd, addr, &ptl);
-       if (!pte)
-               return -ENOMEM;
+       if (create) {
+               pte = (mm == &init_mm) ?
+                       pte_alloc_kernel(pmd, addr) :
+                       pte_alloc_map_lock(mm, pmd, addr, &ptl);
+               if (!pte)
+                       return -ENOMEM;
+       } else {
+               pte = (mm == &init_mm) ?
+                       pte_offset_kernel(pmd, addr) :
+                       pte_offset_map_lock(mm, pmd, addr, &ptl);
+       }
 
        BUG_ON(pmd_huge(*pmd));
 
        arch_enter_lazy_mmu_mode();
 
        do {
-               err = fn(pte++, addr, data);
-               if (err)
-                       break;
+               if (create || !pte_none(*pte)) {
+                       err = fn(pte++, addr, data);
+                       if (err)
+                               break;
+               }
        } while (addr += PAGE_SIZE, addr != end);
 
        arch_leave_lazy_mmu_mode();
@@ -2052,77 +2060,95 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data)
+                                    pte_fn_t fn, void *data, bool create)
 {
        pmd_t *pmd;
        unsigned long next;
-       int err;
+       int err = 0;
 
        BUG_ON(pud_huge(*pud));
 
-       pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd)
-               return -ENOMEM;
+       if (create) {
+               pmd = pmd_alloc(mm, pud, addr);
+               if (!pmd)
+                       return -ENOMEM;
+       } else {
+               pmd = pmd_offset(pud, addr);
+       }
        do {
                next = pmd_addr_end(addr, end);
-               err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
-               if (err)
-                       break;
+               if (create || !pmd_none_or_clear_bad(pmd)) {
+                       err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
+                                                create);
+                       if (err)
+                               break;
+               }
        } while (pmd++, addr = next, addr != end);
        return err;
 }
 
 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data)
+                                    pte_fn_t fn, void *data, bool create)
 {
        pud_t *pud;
        unsigned long next;
-       int err;
+       int err = 0;
 
-       pud = pud_alloc(mm, p4d, addr);
-       if (!pud)
-               return -ENOMEM;
+       if (create) {
+               pud = pud_alloc(mm, p4d, addr);
+               if (!pud)
+                       return -ENOMEM;
+       } else {
+               pud = pud_offset(p4d, addr);
+       }
        do {
                next = pud_addr_end(addr, end);
-               err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
-               if (err)
-                       break;
+               if (create || !pud_none_or_clear_bad(pud)) {
+                       err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
+                                                create);
+                       if (err)
+                               break;
+               }
        } while (pud++, addr = next, addr != end);
        return err;
 }
 
 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
                                     unsigned long addr, unsigned long end,
-                                    pte_fn_t fn, void *data)
+                                    pte_fn_t fn, void *data, bool create)
 {
        p4d_t *p4d;
        unsigned long next;
-       int err;
+       int err = 0;
 
-       p4d = p4d_alloc(mm, pgd, addr);
-       if (!p4d)
-               return -ENOMEM;
+       if (create) {
+               p4d = p4d_alloc(mm, pgd, addr);
+               if (!p4d)
+                       return -ENOMEM;
+       } else {
+               p4d = p4d_offset(pgd, addr);
+       }
        do {
                next = p4d_addr_end(addr, end);
-               err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
-               if (err)
-                       break;
+               if (create || !p4d_none_or_clear_bad(p4d)) {
+                       err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
+                                                create);
+                       if (err)
+                               break;
+               }
        } while (p4d++, addr = next, addr != end);
        return err;
 }
 
-/*
- * Scan a region of virtual memory, filling in page tables as necessary
- * and calling a provided function on each leaf page table.
- */
-int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
-                       unsigned long size, pte_fn_t fn, void *data)
+static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
+                                unsigned long size, pte_fn_t fn,
+                                void *data, bool create)
 {
        pgd_t *pgd;
        unsigned long next;
        unsigned long end = addr + size;
-       int err;
+       int err = 0;
 
        if (WARN_ON(addr >= end))
                return -EINVAL;
@@ -2130,15 +2156,41 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
-               err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
+               if (!create && pgd_none_or_clear_bad(pgd))
+                       continue;
+               err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
 
        return err;
 }
+
+/*
+ * Scan a region of virtual memory, filling in page tables as necessary
+ * and calling a provided function on each leaf page table.
+ */
+int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
+                       unsigned long size, pte_fn_t fn, void *data)
+{
+       return __apply_to_page_range(mm, addr, size, fn, data, true);
+}
 EXPORT_SYMBOL_GPL(apply_to_page_range);
 
+/*
+ * Scan a region of virtual memory, calling a provided function on
+ * each leaf page table where it exists.
+ *
+ * Unlike apply_to_page_range, this does _not_ fill in page tables
+ * where they are absent.
+ */
+int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
+                                unsigned long size, pte_fn_t fn, void *data)
+{
+       return __apply_to_page_range(mm, addr, size, fn, data, false);
+}
+EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
+
 /*
  * handle_pte_fault chooses page fault handler according to an entry which was
  * read non-atomically.  Before making any commitment, on those architectures
@@ -2151,7 +2203,7 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
                                pte_t *page_table, pte_t orig_pte)
 {
        int same = 1;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
        if (sizeof(pte_t) > sizeof(unsigned long)) {
                spinlock_t *ptl = pte_lockptr(mm, pmd);
                spin_lock(ptl);
index 55ac23ef11c1cbf1e13f187b5c850d4075128ad5..a91a072f2b2ce6baf432f09c3cfe74dbaec0033b 100644 (file)
@@ -483,8 +483,9 @@ static void update_pgdat_span(struct pglist_data *pgdat)
        pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
 }
 
-static void __remove_zone(struct zone *zone, unsigned long start_pfn,
-               unsigned long nr_pages)
+void __ref remove_pfn_range_from_zone(struct zone *zone,
+                                     unsigned long start_pfn,
+                                     unsigned long nr_pages)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        unsigned long flags;
@@ -499,28 +500,30 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
                return;
 #endif
 
+       clear_zone_contiguous(zone);
+
        pgdat_resize_lock(zone->zone_pgdat, &flags);
        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
        update_pgdat_span(pgdat);
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
+
+       set_zone_contiguous(zone);
 }
 
-static void __remove_section(struct zone *zone, unsigned long pfn,
-               unsigned long nr_pages, unsigned long map_offset,
-               struct vmem_altmap *altmap)
+static void __remove_section(unsigned long pfn, unsigned long nr_pages,
+                            unsigned long map_offset,
+                            struct vmem_altmap *altmap)
 {
        struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
 
        if (WARN_ON_ONCE(!valid_section(ms)))
                return;
 
-       __remove_zone(zone, pfn, nr_pages);
        sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
 }
 
 /**
- * __remove_pages() - remove sections of pages from a zone
- * @zone: zone from which pages need to be removed
+ * __remove_pages() - remove sections of pages
  * @pfn: starting pageframe (must be aligned to start of a section)
  * @nr_pages: number of pages to remove (must be multiple of section size)
  * @altmap: alternative device page map or %NULL if default memmap is used
@@ -530,16 +533,14 @@ static void __remove_section(struct zone *zone, unsigned long pfn,
  * sure that pages are marked reserved and zones are adjust properly by
  * calling offline_pages().
  */
-void __remove_pages(struct zone *zone, unsigned long pfn,
-                   unsigned long nr_pages, struct vmem_altmap *altmap)
+void __remove_pages(unsigned long pfn, unsigned long nr_pages,
+                   struct vmem_altmap *altmap)
 {
        unsigned long map_offset = 0;
        unsigned long nr, start_sec, end_sec;
 
        map_offset = vmem_altmap_offset(altmap);
 
-       clear_zone_contiguous(zone);
-
        if (check_pfn_span(pfn, nr_pages, "remove"))
                return;
 
@@ -551,13 +552,11 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
                cond_resched();
                pfns = min(nr_pages, PAGES_PER_SECTION
                                - (pfn & ~PAGE_SECTION_MASK));
-               __remove_section(zone, pfn, pfns, map_offset, altmap);
+               __remove_section(pfn, pfns, map_offset, altmap);
                pfn += pfns;
                nr_pages -= pfns;
                map_offset = 0;
        }
-
-       set_zone_contiguous(zone);
 }
 
 int set_online_page_callback(online_page_callback_t callback)
@@ -869,6 +868,7 @@ failed_addition:
                 (unsigned long long) pfn << PAGE_SHIFT,
                 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
        memory_notify(MEM_CANCEL_ONLINE, &arg);
+       remove_pfn_range_from_zone(zone, pfn, nr_pages);
        mem_hotplug_done();
        return ret;
 }
@@ -1628,6 +1628,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        writeback_set_ratelimit();
 
        memory_notify(MEM_OFFLINE, &arg);
+       remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
        mem_hotplug_done();
        return 0;
 
index 067cf7d3daf5af6126f90e481a154392e724559c..b2920ae87a612fa550e224a91758b14f1c836b9d 100644 (file)
@@ -2148,18 +2148,22 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                nmask = policy_nodemask(gfp, pol);
                if (!nmask || node_isset(hpage_node, *nmask)) {
                        mpol_cond_put(pol);
+                       /*
+                        * First, try to allocate THP only on local node, but
+                        * don't reclaim unnecessarily, just compact.
+                        */
                        page = __alloc_pages_node(hpage_node,
-                                               gfp | __GFP_THISNODE, order);
+                               gfp | __GFP_THISNODE | __GFP_NORETRY, order);
 
                        /*
                         * If hugepage allocations are configured to always
                         * synchronous compact or the vma has been madvised
                         * to prefer hugepage backing, retry allowing remote
-                        * memory as well.
+                        * memory with both reclaim and compact as well.
                         */
                        if (!page && (gfp & __GFP_DIRECT_RECLAIM))
                                page = __alloc_pages_node(hpage_node,
-                                               gfp | __GFP_NORETRY, order);
+                                                               gfp, order);
 
                        goto out;
                }
index 03ccbdfeb6972018eea805d5848b4a9b3a3ba4cd..c51c6bd2fe3425f0fc00dae12f29dcec5d395a42 100644 (file)
@@ -120,7 +120,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
 
        mem_hotplug_begin();
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+               __remove_pages(PHYS_PFN(res->start),
                               PHYS_PFN(resource_size(res)), NULL);
        } else {
                arch_remove_memory(nid, res->start, resource_size(res),
index eae1565285e3a0e1ecf2bc12a731d8081d17d31b..86873b6f38a7feba4593f0d9a209dde07e2ed385 100644 (file)
@@ -1512,9 +1512,11 @@ static int do_move_pages_to_node(struct mm_struct *mm,
 /*
  * Resolves the given address to a struct page, isolates it from the LRU and
  * puts it to the given pagelist.
- * Returns -errno if the page cannot be found/isolated or 0 when it has been
- * queued or the page doesn't need to be migrated because it is already on
- * the target node
+ * Returns:
+ *     errno - if the page cannot be found/isolated
+ *     0 - when it doesn't have to be migrated because it is already on the
+ *         target node
+ *     1 - when it has been queued
  */
 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                int node, struct list_head *pagelist, bool migrate_all)
@@ -1553,7 +1555,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
        if (PageHuge(page)) {
                if (PageHead(page)) {
                        isolate_huge_page(page, pagelist);
-                       err = 0;
+                       err = 1;
                }
        } else {
                struct page *head;
@@ -1563,7 +1565,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                if (err)
                        goto out_putpage;
 
-               err = 0;
+               err = 1;
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
                        NR_ISOLATED_ANON + page_is_file_cache(head),
@@ -1640,8 +1642,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                 */
                err = add_page_for_migration(mm, addr, current_node,
                                &pagelist, flags & MPOL_MF_MOVE_ALL);
-               if (!err)
+
+               if (!err) {
+                       /* The page is already on the target node */
+                       err = store_status(status, i, current_node, 1);
+                       if (err)
+                               goto out_flush;
                        continue;
+               } else if (err > 0) {
+                       /* The page is successfully queued for migration */
+                       continue;
+               }
 
                err = store_status(status, i, err, 1);
                if (err)
index 9c648524e4dc7eda8fadb087566da216fc61417c..bc788548c4e588ba4017e51cf8179313ee737bfc 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm,
  * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE:
- *                                                             r: (no) no
- *                                                             w: (no) no
- *                                                             x: (yes) yes
  */
 pgprot_t protection_map[16] __ro_after_init = {
        __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
@@ -3342,6 +3336,8 @@ static const struct vm_operations_struct special_mapping_vmops = {
        .fault = special_mapping_fault,
        .mremap = special_mapping_mremap,
        .name = special_mapping_name,
+       /* vDSO code relies that VVAR can't be accessed remotely */
+       .access = NULL,
 };
 
 static const struct vm_operations_struct legacy_special_mapping_vmops = {
index 71e3acea781764248161da9d7e80046fe823eb72..d58c481b3df833e7ab12ecceaee8ed0b09a7e884 100644 (file)
@@ -890,7 +890,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
                K(get_mm_counter(mm, MM_FILEPAGES)),
                K(get_mm_counter(mm, MM_SHMEMPAGES)),
                from_kuid(&init_user_ns, task_uid(victim)),
-               mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
+               mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
        task_unlock(victim);
 
        /*
index 50055d2e4ea85cf305e209f8a7dd3c84c8251721..2caf780a42e7ae6129aa10f9d2d5dd890a7716f9 100644 (file)
@@ -201,11 +201,11 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
        if (this_bw < tot_bw) {
                if (min) {
                        min *= this_bw;
-                       do_div(min, tot_bw);
+                       min = div64_ul(min, tot_bw);
                }
                if (max < 100) {
                        max *= this_bw;
-                       do_div(max, tot_bw);
+                       max = div64_ul(max, tot_bw);
                }
        }
 
@@ -766,7 +766,7 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
        struct wb_domain *dom = dtc_dom(dtc);
        unsigned long thresh = dtc->thresh;
        u64 wb_thresh;
-       long numerator, denominator;
+       unsigned long numerator, denominator;
        unsigned long wb_min_ratio, wb_max_ratio;
 
        /*
@@ -777,7 +777,7 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
 
        wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
        wb_thresh *= numerator;
-       do_div(wb_thresh, denominator);
+       wb_thresh = div64_ul(wb_thresh, denominator);
 
        wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
 
@@ -1102,7 +1102,7 @@ static void wb_update_write_bandwidth(struct bdi_writeback *wb,
        bw = written - min(written, wb->written_stamp);
        bw *= HZ;
        if (unlikely(elapsed > period)) {
-               do_div(bw, elapsed);
+               bw = div64_ul(bw, elapsed);
                avg = bw;
                goto out;
        }
index 4785a8a2040eeccaa996e93597b30ddf4626e838..d047bf7d8fd406b9959969ab65b433d8954b2803 100644 (file)
@@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
-#else
+bool _debug_pagealloc_enabled_early __read_mostly
+                       = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
-#endif
 EXPORT_SYMBOL(_debug_pagealloc_enabled);
 
 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
 
 static int __init early_debug_pagealloc(char *buf)
 {
-       bool enable = false;
-
-       if (kstrtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable)
-               static_branch_enable(&_debug_pagealloc_enabled);
-
-       return 0;
+       return kstrtobool(buf, &_debug_pagealloc_enabled_early);
 }
 early_param("debug_pagealloc", early_debug_pagealloc);
 
-static void init_debug_guardpage(void)
+void init_debug_pagealloc(void)
 {
        if (!debug_pagealloc_enabled())
                return;
 
+       static_branch_enable(&_debug_pagealloc_enabled);
+
        if (!debug_guardpage_minorder())
                return;
 
@@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
         */
        arch_free_page(page, order);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 0);
 
        kasan_free_nondeferred_pages(page, order);
@@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page)
 
 static bool bulkfree_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_check(page);
        else
                return false;
@@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
  */
 static bool free_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_prepare(page, 0, true);
        else
                return free_pages_prepare(page, 0, false);
@@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void)
 
        for_each_populated_zone(zone)
                set_zone_contiguous(zone);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       init_debug_guardpage();
-#endif
 }
 
 #ifdef CONFIG_CMA
@@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void)
  */
 static inline bool check_pcp_refill(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page)
 }
 static inline bool check_new_pcp(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
        set_page_refcounted(page);
 
        arch_alloc_page(page, order);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 1);
        kasan_alloc_pages(page, order);
        kernel_poison_pages(page, 1 << order, 1);
@@ -4476,8 +4465,11 @@ retry_cpuset:
                if (page)
                        goto got_pg;
 
-                if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
-                    !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
+               /*
+                * Checks for costly allocations with __GFP_NORETRY, which
+                * includes some THP page fault allocations
+                */
+               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
                        /*
                         * If allocating entire pageblock(s) and compaction
                         * failed because all zones are below low watermarks
@@ -4498,23 +4490,6 @@ retry_cpuset:
                        if (compact_result == COMPACT_SKIPPED ||
                            compact_result == COMPACT_DEFERRED)
                                goto nopage;
-               }
-
-               /*
-                * Checks for costly allocations with __GFP_NORETRY, which
-                * includes THP page fault allocations
-                */
-               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
-                       /*
-                        * If compaction is deferred for high-order allocations,
-                        * it is because sync compaction recently failed. If
-                        * this is the case and the caller requested a THP
-                        * allocation, we do not want to heavily disrupt the
-                        * system, so we fail the allocation instead of entering
-                        * direct reclaim.
-                        */
-                       if (compact_result == COMPACT_DEFERRED)
-                               goto nopage;
 
                        /*
                         * Looks like reclaim/compaction is worth trying, but
index 165fa633299348412084a81fb546bd74f1425436..8793e8cc1a48c94939b4f235a77ced1c2f3b2a4b 100644 (file)
@@ -2107,9 +2107,10 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        /*
         * Our priority is to support MAP_SHARED mapped hugely;
         * and support MAP_PRIVATE mapped hugely too, until it is COWed.
-        * But if caller specified an address hint, respect that as before.
+        * But if caller specified an address hint and we allocated area there
+        * successfully, respect that as before.
         */
-       if (uaddr)
+       if (uaddr == addr)
                return addr;
 
        if (shmem_huge != SHMEM_HUGE_FORCE) {
@@ -2143,7 +2144,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        if (inflated_len < len)
                return addr;
 
-       inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
+       inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
        if (IS_ERR_VALUE(inflated_addr))
                return addr;
        if (inflated_addr & ~PAGE_MASK)
index f1e1840af53309298b289c521e90ff6ac0a952b9..a89633603b2d7f775655680d8865be8c9eab6f6a 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1416,7 +1416,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 #if DEBUG
 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
 {
-       if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
+       if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
                (cachep->size % PAGE_SIZE) == 0)
                return true;
 
@@ -2008,7 +2008,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
         * to check size >= 256. It guarantees that all necessary small
         * sized slab is initialized in current slab initialization sequence.
         */
-       if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
+       if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
                size >= 256 && cachep->object_size > cache_line_size()) {
                if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
                        size_t tmp_size = ALIGN(size, PAGE_SIZE);
index f0ab6d4ceb4c947207c0f567a6f31e564fa59b29..0d95ddea13b0d835007025e5fc2f487f3c85a76a 100644 (file)
@@ -903,7 +903,8 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
         * deactivates the memcg kmem_caches through workqueue. Make sure all
         * previous workitems on workqueue are processed.
         */
-       flush_workqueue(memcg_kmem_cache_wq);
+       if (likely(memcg_kmem_cache_wq))
+               flush_workqueue(memcg_kmem_cache_wq);
 
        /*
         * If we're racing with children kmem_cache deactivation, it might
index d11389710b12d6f35479aa5d029ac6b5b9205228..0ab92ec8c2a62e6efde1a687391fe77b6e636f64 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -288,7 +288,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
        unsigned long freepointer_addr;
        void *p;
 
-       if (!debug_pagealloc_enabled())
+       if (!debug_pagealloc_enabled_static())
                return get_freepointer(s, object);
 
        freepointer_addr = (unsigned long)object + s->offset;
@@ -1964,7 +1964,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
        return get_any_partial(s, flags, c);
 }
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 /*
  * Calculate the next globally unique transaction for disambiguiation
  * during cmpxchg. The transactions start with the cpu number and are then
@@ -2009,7 +2009,7 @@ static inline void note_cmpxchg_failure(const char *n,
 
        pr_info("%s %s: cmpxchg redo ", n, s->name);
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
                pr_warn("due to cpu change %d -> %d\n",
                        tid_to_cpu(tid), tid_to_cpu(actual_tid));
@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
 
 static void flush_all(struct kmem_cache *s)
 {
-       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
 }
 
 /*
@@ -2637,7 +2637,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        unsigned long flags;
 
        local_irq_save(flags);
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
        /*
         * We may have been preempted and rescheduled on a different
         * cpu before disabling interrupts. Need to reload cpu area
@@ -2691,13 +2691,13 @@ redo:
         * as we end up on the original cpu again when doing the cmpxchg.
         *
         * We should guarantee that tid and kmem_cache are retrieved on
-        * the same cpu. It could be different if CONFIG_PREEMPT so we need
+        * the same cpu. It could be different if CONFIG_PREEMPTION so we need
         * to check if it is matched or not.
         */
        do {
                tid = this_cpu_read(s->cpu_slab->tid);
                c = raw_cpu_ptr(s->cpu_slab);
-       } while (IS_ENABLED(CONFIG_PREEMPT) &&
+       } while (IS_ENABLED(CONFIG_PREEMPTION) &&
                 unlikely(tid != READ_ONCE(c->tid)));
 
        /*
@@ -2971,7 +2971,7 @@ redo:
        do {
                tid = this_cpu_read(s->cpu_slab->tid);
                c = raw_cpu_ptr(s->cpu_slab);
-       } while (IS_ENABLED(CONFIG_PREEMPT) &&
+       } while (IS_ENABLED(CONFIG_PREEMPTION) &&
                 unlikely(tid != READ_ONCE(c->tid)));
 
        /* Same with comment on barrier() in slab_alloc_node() */
index b20ab7cdac867071b27c81e39cffad475851fad5..3822ecbd8a1f644e6e9e1d7b27655e5e5ae00ebe 100644 (file)
@@ -777,7 +777,14 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
        if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
 
-               if (!section_is_early) {
+               /*
+                * When removing an early section, the usage map is kept (as the
+                * usage maps of other sections fall into the same page). It
+                * will be re-used when re-adding the section - which is then no
+                * longer an early section. If the usage map is PageReserved, it
+                * was allocated during boot.
+                */
+               if (!PageReserved(virt_to_page(ms->usage))) {
                        kfree(ms->usage);
                        ms->usage = NULL;
                }
index 4d3b3d60d8939b65f65bfcaad9c4d4c497f83a7a..1f46c3b86f9f7ded21837f7a3e26e6072af46b98 100644 (file)
 
 #include "internal.h"
 
+bool is_vmalloc_addr(const void *x)
+{
+       unsigned long addr = (unsigned long)x;
+
+       return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+EXPORT_SYMBOL(is_vmalloc_addr);
+
 struct vfree_deferred {
        struct llist_head list;
        struct work_struct wq;
@@ -1061,6 +1069,26 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
        return nva_start_addr;
 }
 
+/*
+ * Free a region of KVA allocated by alloc_vmap_area
+ */
+static void free_vmap_area(struct vmap_area *va)
+{
+       /*
+        * Remove from the busy tree/list.
+        */
+       spin_lock(&vmap_area_lock);
+       unlink_va(va, &vmap_area_root);
+       spin_unlock(&vmap_area_lock);
+
+       /*
+        * Insert/Merge it back to the free tree/list.
+        */
+       spin_lock(&free_vmap_area_lock);
+       merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
+       spin_unlock(&free_vmap_area_lock);
+}
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -1073,6 +1101,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        struct vmap_area *va, *pva;
        unsigned long addr;
        int purged = 0;
+       int ret;
 
        BUG_ON(!size);
        BUG_ON(offset_in_page(size));
@@ -1139,6 +1168,7 @@ retry:
        va->va_end = addr + size;
        va->vm = NULL;
 
+
        spin_lock(&vmap_area_lock);
        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
        spin_unlock(&vmap_area_lock);
@@ -1147,6 +1177,12 @@ retry:
        BUG_ON(va->va_start < vstart);
        BUG_ON(va->va_end > vend);
 
+       ret = kasan_populate_vmalloc(addr, size);
+       if (ret) {
+               free_vmap_area(va);
+               return ERR_PTR(ret);
+       }
+
        return va;
 
 overflow:
@@ -1185,26 +1221,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
 
-/*
- * Free a region of KVA allocated by alloc_vmap_area
- */
-static void free_vmap_area(struct vmap_area *va)
-{
-       /*
-        * Remove from the busy tree/list.
-        */
-       spin_lock(&vmap_area_lock);
-       unlink_va(va, &vmap_area_root);
-       spin_unlock(&vmap_area_lock);
-
-       /*
-        * Insert/Merge it back to the free tree/list.
-        */
-       spin_lock(&free_vmap_area_lock);
-       merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
-       spin_unlock(&free_vmap_area_lock);
-}
-
 /*
  * Clear the pagetable entries of a given vmap_area
  */
@@ -1375,7 +1391,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
 {
        flush_cache_vunmap(va->va_start, va->va_end);
        unmap_vmap_area(va);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                flush_tlb_kernel_range(va->va_start, va->va_end);
 
        free_vmap_area_noflush(va);
@@ -1673,7 +1689,7 @@ static void vb_free(const void *addr, unsigned long size)
 
        vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                flush_tlb_kernel_range((unsigned long)addr,
                                        (unsigned long)addr + size);
 
@@ -1771,6 +1787,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
        BUG_ON(addr > VMALLOC_END);
        BUG_ON(!PAGE_ALIGNED(addr));
 
+       kasan_poison_vmalloc(mem, size);
+
        if (likely(count <= VMAP_MAX_ALLOC)) {
                debug_check_no_locks_freed(mem, size);
                vb_free(mem, size);
@@ -1821,6 +1839,9 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
                addr = va->va_start;
                mem = (void *)addr;
        }
+
+       kasan_unpoison_vmalloc(mem, size);
+
        if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
                vm_unmap_ram(mem, count);
                return NULL;
@@ -2075,6 +2096,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
 {
        struct vmap_area *va;
        struct vm_struct *area;
+       unsigned long requested_size = size;
 
        BUG_ON(in_interrupt());
        size = PAGE_ALIGN(size);
@@ -2098,23 +2120,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                return NULL;
        }
 
-       setup_vmalloc_vm(area, va, flags, caller);
+       kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
 
-       /*
-        * For KASAN, if we are in vmalloc space, we need to cover the shadow
-        * area with real memory. If we come here through VM_ALLOC, this is
-        * done by a higher level function that has access to the true size,
-        * which might not be a full page.
-        *
-        * We assume module space comes via VM_ALLOC path.
-        */
-       if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) {
-               if (kasan_populate_vmalloc(area->size, area)) {
-                       unmap_vmap_area(va);
-                       kfree(area);
-                       return NULL;
-               }
-       }
+       setup_vmalloc_vm(area, va, flags, caller);
 
        return area;
 }
@@ -2293,8 +2301,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
        debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
        debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
 
-       if (area->flags & VM_KASAN)
-               kasan_poison_vmalloc(area->addr, area->size);
+       kasan_poison_vmalloc(area->addr, area->size);
 
        vm_remove_mappings(area, deallocate_pages);
 
@@ -2539,7 +2546,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!size || (size >> PAGE_SHIFT) > totalram_pages())
                goto fail;
 
-       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+       area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
                                vm_flags, start, end, node, gfp_mask, caller);
        if (!area)
                goto fail;
@@ -2548,11 +2555,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!addr)
                return NULL;
 
-       if (is_vmalloc_or_module_addr(area->addr)) {
-               if (kasan_populate_vmalloc(real_size, area))
-                       return NULL;
-       }
-
        /*
         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
         * flag. It means that vm_struct is not fully initialized.
@@ -3294,7 +3296,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
        struct vmap_area **vas, *va;
        struct vm_struct **vms;
        int area, area2, last_area, term_area;
-       unsigned long base, start, size, end, last_end;
+       unsigned long base, start, size, end, last_end, orig_start, orig_end;
        bool purged = false;
        enum fit_type type;
 
@@ -3424,6 +3426,15 @@ retry:
 
        spin_unlock(&free_vmap_area_lock);
 
+       /* populate the kasan shadow space */
+       for (area = 0; area < nr_vms; area++) {
+               if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
+                       goto err_free_shadow;
+
+               kasan_unpoison_vmalloc((void *)vas[area]->va_start,
+                                      sizes[area]);
+       }
+
        /* insert all vm's */
        spin_lock(&vmap_area_lock);
        for (area = 0; area < nr_vms; area++) {
@@ -3434,12 +3445,6 @@ retry:
        }
        spin_unlock(&vmap_area_lock);
 
-       /* populate the shadow space outside of the lock */
-       for (area = 0; area < nr_vms; area++) {
-               /* assume success here */
-               kasan_populate_vmalloc(sizes[area], vms[area]);
-       }
-
        kfree(vas);
        return vms;
 
@@ -3451,8 +3456,12 @@ recovery:
         * and when pcpu_get_vm_areas() is success.
         */
        while (area--) {
-               merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
-                                      &free_vmap_area_list);
+               orig_start = vas[area]->va_start;
+               orig_end = vas[area]->va_end;
+               va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
+                                           &free_vmap_area_list);
+               kasan_release_vmalloc(orig_start, orig_end,
+                                     va->va_start, va->va_end);
                vas[area] = NULL;
        }
 
@@ -3487,6 +3496,28 @@ err_free2:
        kfree(vas);
        kfree(vms);
        return NULL;
+
+err_free_shadow:
+       spin_lock(&free_vmap_area_lock);
+       /*
+        * We release all the vmalloc shadows, even the ones for regions that
+        * hadn't been successfully added. This relies on kasan_release_vmalloc
+        * being able to tolerate this case.
+        */
+       for (area = 0; area < nr_vms; area++) {
+               orig_start = vas[area]->va_start;
+               orig_end = vas[area]->va_end;
+               va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
+                                           &free_vmap_area_list);
+               kasan_release_vmalloc(orig_start, orig_end,
+                                     va->va_start, va->va_end);
+               vas[area] = NULL;
+               kfree(vms[area]);
+       }
+       spin_unlock(&free_vmap_area_lock);
+       kfree(vas);
+       kfree(vms);
+       return NULL;
 }
 
 /**
index 74e8edce83ca4f148142cbec62e1b0d4f3026a13..572fb17c62733b2cca817953f0b45212abb5bebb 100644 (file)
@@ -387,7 +387,7 @@ void register_shrinker_prepared(struct shrinker *shrinker)
 {
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
        if (shrinker->flags & SHRINKER_MEMCG_AWARE)
                idr_replace(&shrinker_idr, shrinker, shrinker->id);
 #endif
index 2b2b9aae8a3c63dfc3c8a1b0551ec8c415d69567..22d17ecfe7df464412426e2dc4079c541f28ae4d 100644 (file)
@@ -2069,6 +2069,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
                zs_pool_dec_isolated(pool);
        }
 
+       if (page_zone(newpage) != page_zone(page)) {
+               dec_zone_page_state(page, NR_ZSPAGES);
+               inc_zone_page_state(newpage, NR_ZSPAGES);
+       }
+
        reset_page(page);
        put_page(page);
        page = newpage;
index 2cfdfbfbb2edbddd4d441539e468c2ca0fd0d8f3..bea6e43d45a0ddb5356ab369acec71e4b93bc188 100644 (file)
@@ -523,7 +523,7 @@ int mrp_request_join(const struct net_device *dev,
        struct mrp_attr *attr;
 
        if (sizeof(struct mrp_skb_cb) + len >
-           FIELD_SIZEOF(struct sk_buff, cb))
+           sizeof_field(struct sk_buff, cb))
                return -ENOMEM;
 
        spin_lock_bh(&app->lock);
@@ -548,7 +548,7 @@ void mrp_request_leave(const struct net_device *dev,
        struct mrp_attr *attr;
 
        if (sizeof(struct mrp_skb_cb) + len >
-           FIELD_SIZEOF(struct sk_buff, cb))
+           sizeof_field(struct sk_buff, cb))
                return;
 
        spin_lock_bh(&app->lock);
@@ -692,7 +692,7 @@ static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
         * advance to the next event in its Vector.
         */
        if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
-           FIELD_SIZEOF(struct sk_buff, cb))
+           sizeof_field(struct sk_buff, cb))
                return -1;
        if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
                          mrp_cb(skb)->mh->attrlen) < 0)
index c46daf09a5011fe317d0a950ec2ea6735619a148..bb7ec1a3915ddbda397e920d9d281eddbd62527f 100644 (file)
@@ -126,6 +126,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
 bool vlan_dev_inherit_address(struct net_device *dev,
                              struct net_device *real_dev);
 
index e5bff5cc6f97562a9887195ced5c572c1951915e..2a78da4072de9824ef1074b7596697366b68c21c 100644 (file)
@@ -586,7 +586,8 @@ static int vlan_dev_init(struct net_device *dev)
        return 0;
 }
 
-static void vlan_dev_uninit(struct net_device *dev)
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
 {
        struct vlan_priority_tci_mapping *pm;
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
index c482a6fe939398a82a6d112aa5f306e0765ab387..0db85aeb119b88fae1abaf7d0a10019ba4044915 100644 (file)
@@ -108,11 +108,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct ifla_vlan_flags *flags;
        struct ifla_vlan_qos_mapping *m;
        struct nlattr *attr;
-       int rem;
+       int rem, err;
 
        if (data[IFLA_VLAN_FLAGS]) {
                flags = nla_data(data[IFLA_VLAN_FLAGS]);
-               vlan_dev_change_flags(dev, flags->flags, flags->mask);
+               err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+               if (err)
+                       return err;
        }
        if (data[IFLA_VLAN_INGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
@@ -123,7 +125,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (data[IFLA_VLAN_EGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
                        m = nla_data(attr);
-                       vlan_dev_set_egress_priority(dev, m->from, m->to);
+                       err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+                       if (err)
+                               return err;
                }
        }
        return 0;
@@ -179,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
 
        err = vlan_changelink(dev, tb, data, extack);
-       if (err < 0)
-               return err;
-
-       return register_vlan_dev(dev, extack);
+       if (!err)
+               err = register_vlan_dev(dev, extack);
+       if (err)
+               vlan_dev_uninit(dev);
+       return err;
 }
 
 static inline size_t vlan_qos_map_size(unsigned int n)
index d79221fd4dae2e8b13ac0725899a1b26a559db44..c318967073139dd4add6eb0e10591b6fada22800 100644 (file)
@@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v)
 static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = vcc_walk(seq, 1);
-       if (v)
-               (*pos)++;
+       (*pos)++;
        return v;
 }
 
index b0af3a11d4069cb7e44419d4494f20dd653dbca8..ec7bf5a4a9fc724b63da7bfd7ece79f32b104d99 100644 (file)
@@ -285,6 +285,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
        u32 hash = 0;
        const struct batadv_dat_entry *dat = data;
        const unsigned char *key;
+       __be16 vid;
        u32 i;
 
        key = (const unsigned char *)&dat->ip;
@@ -294,7 +295,8 @@ static u32 batadv_hash_dat(const void *data, u32 size)
                hash ^= (hash >> 6);
        }
 
-       key = (const unsigned char *)&dat->vid;
+       vid = htons(dat->vid);
+       key = (__force const unsigned char *)&vid;
        for (i = 0; i < sizeof(dat->vid); i++) {
                hash += key[i];
                hash += (hash << 10);
index 4a89177def647db602edb96634f45cf1cfbcfd5f..4811ec65bc434d012c76bf0e55fb1ec2fec574a1 100644 (file)
@@ -548,7 +548,7 @@ static void batadv_recv_handler_init(void)
        BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
        BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
 
-       i = FIELD_SIZEOF(struct sk_buff, cb);
+       i = sizeof_field(struct sk_buff, cb);
        BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
 
        /* broadcast packet */
index 915c2d6f7fb9dad0790233d4c0268dc3cb07bf9f..f79205d4444f5fccf9244f4619c33709ee7cd464 100644 (file)
@@ -253,21 +253,21 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
        /* priority is allowed */
 
        if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
-                          FIELD_SIZEOF(struct __sk_buff, priority),
+                          sizeof_field(struct __sk_buff, priority),
                           offsetof(struct __sk_buff, cb)))
                return -EINVAL;
 
        /* cb is allowed */
 
        if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
-                          FIELD_SIZEOF(struct __sk_buff, cb),
+                          sizeof_field(struct __sk_buff, cb),
                           offsetof(struct __sk_buff, tstamp)))
                return -EINVAL;
 
        /* tstamp is allowed */
 
        if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) +
-                          FIELD_SIZEOF(struct __sk_buff, tstamp),
+                          sizeof_field(struct __sk_buff, tstamp),
                           sizeof(struct __sk_buff)))
                return -EINVAL;
 
@@ -438,7 +438,7 @@ static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
        /* flags is allowed */
 
        if (!range_is_zero(ctx, offsetof(struct bpf_flow_keys, flags) +
-                          FIELD_SIZEOF(struct bpf_flow_keys, flags),
+                          sizeof_field(struct bpf_flow_keys, flags),
                           sizeof(struct bpf_flow_keys)))
                return -EINVAL;
 
index 8a8f9e5f264f2a70b246094fa7c31f1be8c0deef..b6fe30e3768f8a6d212cd1a3e92f63cf582a47a7 100644 (file)
@@ -312,7 +312,7 @@ static int __init br_init(void)
 {
        int err;
 
-       BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+       BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > sizeof_field(struct sk_buff, cb));
 
        err = stp_proto_register(&br_stp_proto);
        if (err < 0) {
index af7800103e51e5aee724ff314697d17ec583aeba..59980ecfc9623fd0edcd8f3c4375dc5b68cb704b 100644 (file)
@@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv,
                nf_bridge_pull_encap_header(skb);
        }
 
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
+               return NF_DROP;
+
        if (arp_hdr(skb)->ar_pln != 4) {
                if (is_vlan_arp(skb, state->net))
                        nf_bridge_push_encap_header(skb);
index 2cdfc5d6c25db6d9b565d33833360a9ab0c578c2..8c69f0c95a8ed4d92b6991d05e2c0d8133c9425c 100644 (file)
@@ -22,7 +22,8 @@
 #endif
 
 static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                            struct sk_buff *skb, u32 mtu)
+                            struct sk_buff *skb, u32 mtu,
+                            bool confirm_neigh)
 {
 }
 
index 4096d8a74a2bd7474da1e5ee980d840126f90c83..e1256e03a9a86b5b1249895a0627f7fbfa8e3386 100644 (file)
@@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
 }
 
 static int ebt_buf_add(struct ebt_entries_buf_state *state,
-                      void *data, unsigned int sz)
+                      const void *data, unsigned int sz)
 {
        if (state->buf_kern_start == NULL)
                goto count_only;
@@ -1901,7 +1901,7 @@ enum compat_mwt {
        EBT_COMPAT_TARGET,
 };
 
-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
                                enum compat_mwt compat_mwt,
                                struct ebt_entries_buf_state *state,
                                const unsigned char *base)
@@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
 /* return size of all matches, watchers or target, including necessary
  * alignment and padding.
  */
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
                        unsigned int size_left, enum compat_mwt type,
                        struct ebt_entries_buf_state *state, const void *base)
 {
+       const char *buf = (const char *)match32;
        int growth = 0;
-       char *buf;
 
        if (size_left == 0)
                return 0;
 
-       buf = (char *) match32;
-
-       while (size_left >= sizeof(*match32)) {
+       do {
                struct ebt_entry_match *match_kern;
                int ret;
 
+               if (size_left < sizeof(*match32))
+                       return -EINVAL;
+
                match_kern = (struct ebt_entry_match *) state->buf_kern_start;
                if (match_kern) {
                        char *tmp;
@@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
                if (match_kern)
                        match_kern->match_size = ret;
 
-               /* rule should have no remaining data after target */
-               if (type == EBT_COMPAT_TARGET && size_left)
-                       return -EINVAL;
-
                match32 = (struct compat_ebt_entry_mwt *) buf;
-       }
+       } while (size_left);
 
        return growth;
 }
 
 /* called for all ebt_entry structures. */
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
                          unsigned int *total,
                          struct ebt_entries_buf_state *state)
 {
-       unsigned int i, j, startoff, new_offset = 0;
+       unsigned int i, j, startoff, next_expected_off, new_offset = 0;
        /* stores match/watchers/targets & offset of next struct ebt_entry: */
        unsigned int offsets[4];
        unsigned int *offsets_update = NULL;
@@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
                        return ret;
        }
 
-       startoff = state->buf_user_offset - startoff;
+       next_expected_off = state->buf_user_offset - startoff;
+       if (next_expected_off != entry->next_offset)
+               return -EINVAL;
 
-       if (WARN_ON(*total < startoff))
+       if (*total < entry->next_offset)
                return -EINVAL;
-       *total -= startoff;
+       *total -= entry->next_offset;
        return 0;
 }
 
index 76bd67891fb399621163f08d52096c335ba6dc48..a0116b9503d9da9a2733f0b7f8aae4f1b3ceeda9 100644 (file)
@@ -62,7 +62,7 @@ static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
        hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
 
        if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
-               pr_warn("Headroom to small\n");
+               pr_warn("Headroom too small\n");
                kfree_skb(skb);
                return -EIO;
        }
index de09b0a65791d9a2176ec66f5258d5695574fd68..f7587428febdd23cd703d1bf82ff3c8f18c4a9c1 100644 (file)
@@ -423,9 +423,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 {
        struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
        struct j1939_sock *jsk = j1939_sk(sock->sk);
-       struct j1939_priv *priv = jsk->priv;
-       struct sock *sk = sock->sk;
-       struct net *net = sock_net(sk);
+       struct j1939_priv *priv;
+       struct sock *sk;
+       struct net *net;
        int ret = 0;
 
        ret = j1939_sk_sanity_check(addr, len);
@@ -434,6 +434,10 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
 
        lock_sock(sock->sk);
 
+       priv = jsk->priv;
+       sk = sock->sk;
+       net = sock_net(sk);
+
        /* Already bound to an interface? */
        if (jsk->state & J1939_SOCK_BOUND) {
                /* A re-bind() to a different interface is not
index 2c277b8aba38bf348093967a0fe7dd1d0aca4796..3d3ea1c30cf0990bfda60acef9489ee69abae81b 100644 (file)
@@ -928,7 +928,7 @@ EXPORT_SYMBOL(dev_get_by_napi_id);
  *
  *     The use of raw_seqcount_begin() and cond_resched() before
  *     retrying is required as we want to give the writers a chance
- *     to complete when CONFIG_PREEMPT is not set.
+ *     to complete when CONFIG_PREEMPTION is not set.
  */
 int netdev_get_name(struct net *net, char *name, int ifindex)
 {
@@ -5491,9 +5491,29 @@ static void flush_all_backlogs(void)
        put_online_cpus();
 }
 
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static void gro_normal_list(struct napi_struct *napi)
+{
+       if (!napi->rx_count)
+               return;
+       netif_receive_skb_list_internal(&napi->rx_list);
+       INIT_LIST_HEAD(&napi->rx_list);
+       napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+{
+       list_add_tail(&skb->list, &napi->rx_list);
+       if (++napi->rx_count >= gro_normal_batch)
+               gro_normal_list(napi);
+}
+
 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
-static int napi_gro_complete(struct sk_buff *skb)
+static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct packet_offload *ptype;
        __be16 type = skb->protocol;
@@ -5526,7 +5546,8 @@ static int napi_gro_complete(struct sk_buff *skb)
        }
 
 out:
-       return netif_receive_skb_internal(skb);
+       gro_normal_one(napi, skb);
+       return NET_RX_SUCCESS;
 }
 
 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@ -5539,7 +5560,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
                if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
                        return;
                skb_list_del_init(skb);
-               napi_gro_complete(skb);
+               napi_gro_complete(napi, skb);
                napi->gro_hash[index].count--;
        }
 
@@ -5641,7 +5662,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
        }
 }
 
-static void gro_flush_oldest(struct list_head *head)
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
 {
        struct sk_buff *oldest;
 
@@ -5657,7 +5678,7 @@ static void gro_flush_oldest(struct list_head *head)
         * SKB to the chain.
         */
        skb_list_del_init(oldest);
-       napi_gro_complete(oldest);
+       napi_gro_complete(napi, oldest);
 }
 
 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
@@ -5733,7 +5754,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 
        if (pp) {
                skb_list_del_init(pp);
-               napi_gro_complete(pp);
+               napi_gro_complete(napi, pp);
                napi->gro_hash[hash].count--;
        }
 
@@ -5744,7 +5765,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                goto normal;
 
        if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
-               gro_flush_oldest(gro_head);
+               gro_flush_oldest(napi, gro_head);
        } else {
                napi->gro_hash[hash].count++;
        }
@@ -5802,26 +5823,6 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
-       if (!napi->rx_count)
-               return;
-       netif_receive_skb_list_internal(&napi->rx_list);
-       INIT_LIST_HEAD(&napi->rx_list);
-       napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
-{
-       list_add_tail(&skb->list, &napi->rx_list);
-       if (++napi->rx_count >= gro_normal_batch)
-               gro_normal_list(napi);
-}
-
 static void napi_skb_free_stolen_head(struct sk_buff *skb)
 {
        skb_dst_drop(skb);
@@ -6200,8 +6201,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                                 NAPIF_STATE_IN_BUSY_POLL)))
                return false;
 
-       gro_normal_list(n);
-
        if (n->gro_bitmask) {
                unsigned long timeout = 0;
 
@@ -6217,6 +6216,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                        hrtimer_start(&n->timer, ns_to_ktime(timeout),
                                      HRTIMER_MODE_REL_PINNED);
        }
+
+       gro_normal_list(n);
+
        if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
                local_irq_save(flags);
@@ -6548,8 +6550,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                goto out_unlock;
        }
 
-       gro_normal_list(n);
-
        if (n->gro_bitmask) {
                /* flush too old packets
                 * If HZ < 1000, flush all packets.
@@ -6557,6 +6557,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                napi_gro_flush(n, HZ >= 1000);
        }
 
+       gro_normal_list(n);
+
        /* Some drivers may have called napi_schedule
         * prior to exhausting their budget.
         */
@@ -8194,6 +8196,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(__dev_set_mtu);
 
+int dev_validate_mtu(struct net_device *dev, int new_mtu,
+                    struct netlink_ext_ack *extack)
+{
+       /* MTU must be positive, and in range */
+       if (new_mtu < 0 || new_mtu < dev->min_mtu) {
+               NL_SET_ERR_MSG(extack, "mtu less than device minimum");
+               return -EINVAL;
+       }
+
+       if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
+               NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /**
  *     dev_set_mtu_ext - Change maximum transfer unit
  *     @dev: device
@@ -8210,16 +8228,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
        if (new_mtu == dev->mtu)
                return 0;
 
-       /* MTU must be positive, and in range */
-       if (new_mtu < 0 || new_mtu < dev->min_mtu) {
-               NL_SET_ERR_MSG(extack, "mtu less than device minimum");
-               return -EINVAL;
-       }
-
-       if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
-               NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
-               return -EINVAL;
-       }
+       err = dev_validate_mtu(dev, new_mtu, extack);
+       if (err)
+               return err;
 
        if (!netif_device_present(dev))
                return -ENODEV;
@@ -9177,22 +9188,10 @@ static void netdev_unregister_lockdep_key(struct net_device *dev)
 
 void netdev_update_lockdep_key(struct net_device *dev)
 {
-       struct netdev_queue *queue;
-       int i;
-
-       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
        lockdep_unregister_key(&dev->addr_list_lock_key);
-
-       lockdep_register_key(&dev->qdisc_xmit_lock_key);
        lockdep_register_key(&dev->addr_list_lock_key);
 
        lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
-       for (i = 0; i < dev->num_tx_queues; i++) {
-               queue = netdev_get_tx_queue(dev, i);
-
-               lockdep_set_class(&queue->_xmit_lock,
-                                 &dev->qdisc_xmit_lock_key);
-       }
 }
 EXPORT_SYMBOL(netdev_update_lockdep_key);
 
@@ -9314,8 +9313,10 @@ int register_netdevice(struct net_device *dev)
                goto err_uninit;
 
        ret = netdev_register_kobject(dev);
-       if (ret)
+       if (ret) {
+               dev->reg_state = NETREG_UNREGISTERED;
                goto err_uninit;
+       }
        dev->reg_state = NETREG_REGISTERED;
 
        __netdev_update_features(dev);
@@ -10165,7 +10166,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)
 static int __net_init netdev_init(struct net *net)
 {
        BUILD_BUG_ON(GRO_HASH_BUCKETS >
-                    8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
+                    8 * sizeof_field(struct napi_struct, gro_bitmask));
 
        if (net != &init_net)
                INIT_LIST_HEAD(&net->dev_base_head);
index 4c63c9a4c09e62fdc8b428ae5a990feb329c47f6..f76219bf0c21d778fe0a5f64d7a1b739bca922ef 100644 (file)
@@ -6406,7 +6406,7 @@ static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
               devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA;
 }
 
-#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30)
+#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
 
 static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
 {
@@ -7563,7 +7563,7 @@ void devlink_region_destroy(struct devlink_region *region)
 EXPORT_SYMBOL_GPL(devlink_region_destroy);
 
 /**
- *     devlink_region_shapshot_id_get - get snapshot ID
+ *     devlink_region_snapshot_id_get - get snapshot ID
  *
  *     This callback should be called when adding a new snapshot,
  *     Driver should use the same id for multiple snapshots taken
@@ -7571,7 +7571,7 @@ EXPORT_SYMBOL_GPL(devlink_region_destroy);
  *
  *     @devlink: devlink
  */
-u32 devlink_region_shapshot_id_get(struct devlink *devlink)
+u32 devlink_region_snapshot_id_get(struct devlink *devlink)
 {
        u32 id;
 
@@ -7581,7 +7581,7 @@ u32 devlink_region_shapshot_id_get(struct devlink *devlink)
 
        return id;
 }
-EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
 
 /**
  *     devlink_region_snapshot_create - create a new snapshot
index f1e703eed3d22d226550c153b6636e6a9b13174b..538f6a735a19f017df8e10149cb578107ddc8cbb 100644 (file)
@@ -274,7 +274,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
 
        switch (skb_field) {
        case SKF_AD_MARK:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
                                      offsetof(struct sk_buff, mark));
@@ -289,14 +289,14 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
                break;
 
        case SKF_AD_QUEUE:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
                                      offsetof(struct sk_buff, queue_mapping));
                break;
 
        case SKF_AD_VLAN_TAG:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
 
                /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
                *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
@@ -322,7 +322,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
 
        switch (fp->k) {
        case SKF_AD_OFF + SKF_AD_PROTOCOL:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
 
                /* A = *(u16 *) (CTX + offsetof(protocol)) */
                *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
@@ -338,8 +338,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
 
        case SKF_AD_OFF + SKF_AD_IFINDEX:
        case SKF_AD_OFF + SKF_AD_HATYPE:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
-               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
+               BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
+               BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
                                      BPF_REG_TMP, BPF_REG_CTX,
@@ -361,7 +361,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                break;
 
        case SKF_AD_OFF + SKF_AD_RXHASH:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
 
                *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
                                    offsetof(struct sk_buff, hash));
@@ -385,7 +385,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                break;
 
        case SKF_AD_OFF + SKF_AD_VLAN_TPID:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
 
                /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
                *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
@@ -2055,6 +2055,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
        }
 
        skb->dev = dev;
+       skb->tstamp = 0;
 
        dev_xmit_recursion_inc();
        ret = dev_queue_xmit(skb);
@@ -2230,10 +2231,10 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
+               offset += len;
                len = sk_msg_elem(msg, i)->length;
                if (start < offset + len)
                        break;
-               offset += len;
                sk_msg_iter_var_next(i);
        } while (i != msg->sg.end);
 
@@ -2345,7 +2346,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
           u32, len, u64, flags)
 {
        struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
-       u32 new, i = 0, l, space, copy = 0, offset = 0;
+       u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
        u8 *raw, *to, *from;
        struct page *page;
 
@@ -2355,11 +2356,11 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
+               offset += l;
                l = sk_msg_elem(msg, i)->length;
 
                if (start < offset + l)
                        break;
-               offset += l;
                sk_msg_iter_var_next(i);
        } while (i != msg->sg.end);
 
@@ -2414,6 +2415,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
 
                sk_msg_iter_var_next(i);
                sg_unmark_end(psge);
+               sg_unmark_end(&rsge);
                sk_msg_iter_next(msg, end);
        }
 
@@ -2505,7 +2507,7 @@ static void sk_msg_shift_right(struct sk_msg *msg, int i)
 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
           u32, len, u64, flags)
 {
-       u32 i = 0, l, space, offset = 0;
+       u32 i = 0, l = 0, space, offset = 0;
        u64 last = start + len;
        int pop;
 
@@ -2515,11 +2517,11 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
        /* First find the starting scatterlist element */
        i = msg->sg.start;
        do {
+               offset += l;
                l = sk_msg_elem(msg, i)->length;
 
                if (start < offset + l)
                        break;
-               offset += l;
                sk_msg_iter_var_next(i);
        } while (i != msg->sg.end);
 
@@ -5317,8 +5319,7 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        if (sk) {
                sk = sk_to_full_sk(sk);
                if (!sk_fullsock(sk)) {
-                       if (!sock_flag(sk, SOCK_RCU_FREE))
-                               sock_gen_put(sk);
+                       sock_gen_put(sk);
                        return NULL;
                }
        }
@@ -5355,8 +5356,7 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        if (sk) {
                sk = sk_to_full_sk(sk);
                if (!sk_fullsock(sk)) {
-                       if (!sock_flag(sk, SOCK_RCU_FREE))
-                               sock_gen_put(sk);
+                       sock_gen_put(sk);
                        return NULL;
                }
        }
@@ -5423,7 +5423,8 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
 
 BPF_CALL_1(bpf_sk_release, struct sock *, sk)
 {
-       if (!sock_flag(sk, SOCK_RCU_FREE))
+       /* Only full sockets have sk->sk_flags. */
+       if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
                sock_gen_put(sk);
        return 0;
 }
@@ -5589,8 +5590,8 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
 
 #define BPF_TCP_SOCK_GET_COMMON(FIELD)                                 \
        do {                                                            \
-               BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) >     \
-                            FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+               BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) >     \
+                            sizeof_field(struct bpf_tcp_sock, FIELD)); \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
                                      si->dst_reg, si->src_reg,         \
                                      offsetof(struct tcp_sock, FIELD)); \
@@ -5598,9 +5599,9 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
 
 #define BPF_INET_SOCK_GET_COMMON(FIELD)                                        \
        do {                                                            \
-               BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock,  \
+               BUILD_BUG_ON(sizeof_field(struct inet_connection_sock,  \
                                          FIELD) >                      \
-                            FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+                            sizeof_field(struct bpf_tcp_sock, FIELD)); \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                 \
                                        struct inet_connection_sock,    \
                                        FIELD),                         \
@@ -5615,7 +5616,7 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
 
        switch (si->off) {
        case offsetof(struct bpf_tcp_sock, rtt_min):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+               BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
                             sizeof(struct minmax));
                BUILD_BUG_ON(sizeof(struct minmax) <
                             sizeof(struct minmax_sample));
@@ -5780,8 +5781,8 @@ u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
 
 #define BPF_XDP_SOCK_GET(FIELD)                                                \
        do {                                                            \
-               BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) >     \
-                            FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
+               BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) >     \
+                            sizeof_field(struct bpf_xdp_sock, FIELD)); \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
                                      si->dst_reg, si->src_reg,         \
                                      offsetof(struct xdp_sock, FIELD)); \
@@ -7344,7 +7345,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 
        case offsetof(struct __sk_buff, cb[0]) ...
             offsetofend(struct __sk_buff, cb[4]) - 1:
-               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+               BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
                BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
                              offsetof(struct qdisc_skb_cb, data)) %
                             sizeof(__u64));
@@ -7363,7 +7364,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct __sk_buff, tc_classid):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
+               BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
 
                off  = si->off;
                off -= offsetof(struct __sk_buff, tc_classid);
@@ -7434,7 +7435,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
 #endif
                break;
        case offsetof(struct __sk_buff, family):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
                                      si->dst_reg, si->src_reg,
@@ -7445,7 +7446,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                                                     2, target_size));
                break;
        case offsetof(struct __sk_buff, remote_ip4):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
                                      si->dst_reg, si->src_reg,
@@ -7456,7 +7457,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                                                     4, target_size));
                break;
        case offsetof(struct __sk_buff, local_ip4):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_rcv_saddr) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
@@ -7470,7 +7471,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
        case offsetof(struct __sk_buff, remote_ip6[0]) ...
             offsetof(struct __sk_buff, remote_ip6[3]):
 #if IS_ENABLED(CONFIG_IPV6)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_v6_daddr.s6_addr32[0]) != 4);
 
                off = si->off;
@@ -7490,7 +7491,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
        case offsetof(struct __sk_buff, local_ip6[0]) ...
             offsetof(struct __sk_buff, local_ip6[3]):
 #if IS_ENABLED(CONFIG_IPV6)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_v6_rcv_saddr.s6_addr32[0]) != 4);
 
                off = si->off;
@@ -7509,7 +7510,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct __sk_buff, remote_port):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
                                      si->dst_reg, si->src_reg,
@@ -7524,7 +7525,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct __sk_buff, local_port):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
                                      si->dst_reg, si->src_reg,
@@ -7535,7 +7536,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct __sk_buff, tstamp):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
+               BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
 
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_DW,
@@ -7573,7 +7574,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                                                     target_size));
                break;
        case offsetof(struct __sk_buff, wire_len):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
+               BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
 
                off = si->off;
                off -= offsetof(struct __sk_buff, wire_len);
@@ -7603,7 +7604,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
 
        switch (si->off) {
        case offsetof(struct bpf_sock, bound_dev_if):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
 
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
@@ -7614,7 +7615,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock, mark):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
 
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
@@ -7625,7 +7626,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock, priority):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
 
                if (type == BPF_WRITE)
                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
@@ -7641,7 +7642,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                        si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common,
                                       skc_family,
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_family),
                                       target_size));
                break;
@@ -7668,7 +7669,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_LDX_MEM(
                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common, skc_rcv_saddr,
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_rcv_saddr),
                                       target_size));
                break;
@@ -7677,7 +7678,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_LDX_MEM(
                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common, skc_daddr,
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_daddr),
                                       target_size));
                break;
@@ -7691,7 +7692,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                        bpf_target_off(
                                struct sock_common,
                                skc_v6_rcv_saddr.s6_addr32[0],
-                               FIELD_SIZEOF(struct sock_common,
+                               sizeof_field(struct sock_common,
                                             skc_v6_rcv_saddr.s6_addr32[0]),
                                target_size) + off);
 #else
@@ -7708,7 +7709,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common,
                                       skc_v6_daddr.s6_addr32[0],
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_v6_daddr.s6_addr32[0]),
                                       target_size) + off);
 #else
@@ -7722,7 +7723,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                        BPF_FIELD_SIZEOF(struct sock_common, skc_num),
                        si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common, skc_num,
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_num),
                                       target_size));
                break;
@@ -7732,7 +7733,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                        BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
                        si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common, skc_dport,
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_dport),
                                       target_size));
                break;
@@ -7742,7 +7743,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                        BPF_FIELD_SIZEOF(struct sock_common, skc_state),
                        si->dst_reg, si->src_reg,
                        bpf_target_off(struct sock_common, skc_state,
-                                      FIELD_SIZEOF(struct sock_common,
+                                      sizeof_field(struct sock_common,
                                                    skc_state),
                                       target_size));
                break;
@@ -7837,7 +7838,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
                                      si->src_reg, offsetof(S, F));            \
                *insn++ = BPF_LDX_MEM(                                         \
                        SIZE, si->dst_reg, si->dst_reg,                        \
-                       bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
+                       bpf_target_off(NS, NF, sizeof_field(NS, NF),           \
                                       target_size)                            \
                                + OFF);                                        \
        } while (0)
@@ -7868,7 +7869,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,         \
                                      si->dst_reg, offsetof(S, F));            \
                *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,              \
-                       bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
+                       bpf_target_off(NS, NF, sizeof_field(NS, NF),           \
                                       target_size)                            \
                                + OFF);                                        \
                *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,            \
@@ -7930,8 +7931,8 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
                 */
                BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
                             offsetof(struct sockaddr_in6, sin6_port));
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
-                            FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
+               BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
+                            sizeof_field(struct sockaddr_in6, sin6_port));
                SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
                                                     struct sockaddr_in6, uaddr,
                                                     sin6_port, tmp_reg);
@@ -7997,8 +7998,8 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 /* Helper macro for adding read access to tcp_sock or sock fields. */
 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                        \
        do {                                                                  \
-               BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
-                            FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
+               BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >                   \
+                            sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
                                                struct bpf_sock_ops_kern,     \
                                                is_fullsock),                 \
@@ -8031,8 +8032,8 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                        \
        do {                                                                  \
                int reg = BPF_REG_9;                                          \
-               BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
-                            FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
+               BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >                   \
+                            sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
                if (si->dst_reg == reg || si->src_reg == reg)                 \
                        reg--;                                                \
                if (si->dst_reg == reg || si->src_reg == reg)                 \
@@ -8073,12 +8074,12 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
        switch (si->off) {
        case offsetof(struct bpf_sock_ops, op) ...
             offsetof(struct bpf_sock_ops, replylong[3]):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
-                            FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
-               BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
-                            FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
-               BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
-                            FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
+               BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) !=
+                            sizeof_field(struct bpf_sock_ops_kern, op));
+               BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
+                            sizeof_field(struct bpf_sock_ops_kern, reply));
+               BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
+                            sizeof_field(struct bpf_sock_ops_kern, replylong));
                off = si->off;
                off -= offsetof(struct bpf_sock_ops, op);
                off += offsetof(struct bpf_sock_ops_kern, op);
@@ -8091,7 +8092,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, family):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                              struct bpf_sock_ops_kern, sk),
@@ -8102,7 +8103,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, remote_ip4):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct bpf_sock_ops_kern, sk),
@@ -8113,7 +8114,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, local_ip4):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_rcv_saddr) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
@@ -8128,7 +8129,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
        case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
             offsetof(struct bpf_sock_ops, remote_ip6[3]):
 #if IS_ENABLED(CONFIG_IPV6)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_v6_daddr.s6_addr32[0]) != 4);
 
                off = si->off;
@@ -8149,7 +8150,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
        case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
             offsetof(struct bpf_sock_ops, local_ip6[3]):
 #if IS_ENABLED(CONFIG_IPV6)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_v6_rcv_saddr.s6_addr32[0]) != 4);
 
                off = si->off;
@@ -8168,7 +8169,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, remote_port):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct bpf_sock_ops_kern, sk),
@@ -8182,7 +8183,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, local_port):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct bpf_sock_ops_kern, sk),
@@ -8202,7 +8203,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, state):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct bpf_sock_ops_kern, sk),
@@ -8213,7 +8214,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct bpf_sock_ops, rtt_min):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
+               BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
                             sizeof(struct minmax));
                BUILD_BUG_ON(sizeof(struct minmax) <
                             sizeof(struct minmax_sample));
@@ -8224,7 +8225,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                                      offsetof(struct bpf_sock_ops_kern, sk));
                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
                                      offsetof(struct tcp_sock, rtt_min) +
-                                     FIELD_SIZEOF(struct minmax_sample, t));
+                                     sizeof_field(struct minmax_sample, t));
                break;
 
        case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
@@ -8366,7 +8367,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
                                      offsetof(struct sk_msg, data_end));
                break;
        case offsetof(struct sk_msg_md, family):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                              struct sk_msg, sk),
@@ -8377,7 +8378,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_msg_md, remote_ip4):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct sk_msg, sk),
@@ -8388,7 +8389,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_msg_md, local_ip4):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_rcv_saddr) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
@@ -8403,7 +8404,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
        case offsetof(struct sk_msg_md, remote_ip6[0]) ...
             offsetof(struct sk_msg_md, remote_ip6[3]):
 #if IS_ENABLED(CONFIG_IPV6)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_v6_daddr.s6_addr32[0]) != 4);
 
                off = si->off;
@@ -8424,7 +8425,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
        case offsetof(struct sk_msg_md, local_ip6[0]) ...
             offsetof(struct sk_msg_md, local_ip6[3]):
 #if IS_ENABLED(CONFIG_IPV6)
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
+               BUILD_BUG_ON(sizeof_field(struct sock_common,
                                          skc_v6_rcv_saddr.s6_addr32[0]) != 4);
 
                off = si->off;
@@ -8443,7 +8444,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_msg_md, remote_port):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct sk_msg, sk),
@@ -8457,7 +8458,7 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_msg_md, local_port):
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
+               BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
                                                struct sk_msg, sk),
@@ -8847,7 +8848,7 @@ sk_reuseport_is_valid_access(int off, int size,
 
        /* Fields that allow narrowing */
        case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
-               if (size < FIELD_SIZEOF(struct sk_buff, protocol))
+               if (size < sizeof_field(struct sk_buff, protocol))
                        return false;
                /* fall through */
        case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
@@ -8865,7 +8866,7 @@ sk_reuseport_is_valid_access(int off, int size,
        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
                              si->dst_reg, si->src_reg,                 \
                              bpf_target_off(struct sk_reuseport_kern, F, \
-                                            FIELD_SIZEOF(struct sk_reuseport_kern, F), \
+                                            sizeof_field(struct sk_reuseport_kern, F), \
                                             target_size));             \
        })
 
index d524a693e00faa4e37572984fb4db357b9a0879a..2dbbb030fbedfa65698b2fa19cbd8c7c57f639ac 100644 (file)
@@ -599,8 +599,8 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
        offset += sizeof(struct gre_base_hdr);
 
        if (hdr->flags & GRE_CSUM)
-               offset += FIELD_SIZEOF(struct gre_full_hdr, csum) +
-                         FIELD_SIZEOF(struct gre_full_hdr, reserved1);
+               offset += sizeof_field(struct gre_full_hdr, csum) +
+                         sizeof_field(struct gre_full_hdr, reserved1);
 
        if (hdr->flags & GRE_KEY) {
                const __be32 *keyid;
@@ -622,11 +622,11 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
                        else
                                key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
                }
-               offset += FIELD_SIZEOF(struct gre_full_hdr, key);
+               offset += sizeof_field(struct gre_full_hdr, key);
        }
 
        if (hdr->flags & GRE_SEQ)
-               offset += FIELD_SIZEOF(struct pptp_gre_header, seq);
+               offset += sizeof_field(struct pptp_gre_header, seq);
 
        if (gre_ver == 0) {
                if (*p_proto == htons(ETH_P_TEB)) {
@@ -653,7 +653,7 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
                u8 *ppp_hdr;
 
                if (hdr->flags & GRE_ACK)
-                       offset += FIELD_SIZEOF(struct pptp_gre_header, ack);
+                       offset += sizeof_field(struct pptp_gre_header, ack);
 
                ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
                                               sizeof(_ppp_hdr),
index 652da63690376b52d084a901310db39f4258aca3..789a73aa7bd87b3492e983f8c6c6723e1e3c6bc5 100644 (file)
@@ -98,9 +98,6 @@ static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
 
 static void neigh_cleanup_and_release(struct neighbour *neigh)
 {
-       if (neigh->parms->neigh_cleanup)
-               neigh->parms->neigh_cleanup(neigh);
-
        trace_neigh_cleanup_and_release(neigh, 0);
        __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
        call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
@@ -3293,6 +3290,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu+1;
                return per_cpu_ptr(tbl->stats, cpu);
        }
+       (*pos)++;
        return NULL;
 }
 
index 5c4624298996c21607647599a2a07d6cfc3d8ebb..4c826b8bf9b1e18e2cf0f74ebb111022fe1d193f 100644 (file)
@@ -919,14 +919,17 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        struct kobject *kobj = &queue->kobj;
        int error = 0;
 
+       /* Kobject_put later will trigger rx_queue_release call which
+        * decreases dev refcount: Take that reference here
+        */
+       dev_hold(queue->dev);
+
        kobj->kset = dev->queues_kset;
        error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
                                     "rx-%u", index);
        if (error)
                goto err;
 
-       dev_hold(queue->dev);
-
        if (dev->sysfs_rx_queue_group) {
                error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
                if (error)
index 02916f43bf63cdbdd7d2c7e030776d7203421232..d9001b5c48eb4e1af583f2d186451be592622e9d 100644 (file)
@@ -3048,8 +3048,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
        dev->rtnl_link_ops = ops;
        dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
 
-       if (tb[IFLA_MTU])
-               dev->mtu = nla_get_u32(tb[IFLA_MTU]);
+       if (tb[IFLA_MTU]) {
+               u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+               int err;
+
+               err = dev_validate_mtu(dev, mtu, extack);
+               if (err) {
+                       free_netdev(dev);
+                       return ERR_PTR(err);
+               }
+               dev->mtu = mtu;
+       }
        if (tb[IFLA_ADDRESS]) {
                memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
                                nla_len(tb[IFLA_ADDRESS]));
index 043db3ce023e592e9f1b6602376097c28f529cfd..8459ad579f735ce724b559f7114d1b77f360e5b2 100644 (file)
@@ -2916,7 +2916,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 
        sk->sk_max_pacing_rate = ~0UL;
        sk->sk_pacing_rate = ~0UL;
-       sk->sk_pacing_shift = 10;
+       WRITE_ONCE(sk->sk_pacing_shift, 10);
        sk->sk_incoming_cpu = -1;
 
        sk_rx_queue_clear(sk);
index eb114ee419b65a9c9dc282bcfe3f03ea23f49727..8998e356f423247d96cefcab054be37b1051b5b4 100644 (file)
@@ -241,8 +241,11 @@ static void sock_map_free(struct bpf_map *map)
                struct sock *sk;
 
                sk = xchg(psk, NULL);
-               if (sk)
+               if (sk) {
+                       lock_sock(sk);
                        sock_map_unref(sk, psk);
+                       release_sock(sk);
+               }
        }
        raw_spin_unlock_bh(&stab->lock);
        rcu_read_unlock();
@@ -862,7 +865,9 @@ static void sock_hash_free(struct bpf_map *map)
                raw_spin_lock_bh(&bucket->lock);
                hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
                        hlist_del_rcu(&elem->node);
+                       lock_sock(elem->sk);
                        sock_map_unref(elem->sk, elem);
+                       release_sock(elem->sk);
                }
                raw_spin_unlock_bh(&bucket->lock);
        }
index eb29e5adc84daec3ee87157e44b3358a1bae82a3..9f9e00ba3ad7ca61c90073bd89953c9ccd3a0661 100644 (file)
@@ -288,6 +288,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
        return ret;
 }
 
+# ifdef CONFIG_HAVE_EBPF_JIT
 static int
 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
                                    void __user *buffer, size_t *lenp,
@@ -298,6 +299,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
 
        return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 }
+# endif /* CONFIG_HAVE_EBPF_JIT */
 
 static int
 proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
index 6b6e51db9f3b92986af8461011c6b8c3581170b0..1f31a39236d52fea76eb4925e5c43636c7e9477f 100644 (file)
@@ -438,6 +438,23 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
+/**
+ * inet_proto_csum_replace16 - update layer 4 header checksum field
+ * @sum: Layer 4 header checksum field
+ * @skb: sk_buff for the packet
+ * @from: old IPv6 address
+ * @to: new IPv6 address
+ * @pseudohdr: True if layer 4 header checksum includes pseudoheader
+ *
+ * Update layer 4 header as per the update in IPv6 src/dst address.
+ *
+ * There is no need to update skb->csum in this function, because update in two
+ * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other
+ * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to
+ * update skb->csum, because update in 3 fields a.) IPv4 src/dst address,
+ * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as
+ * L4 Header checksum for skb->csum calculation.
+ */
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
                               bool pseudohdr)
@@ -449,9 +466,6 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                *sum = csum_fold(csum_partial(diff, sizeof(diff),
                                 ~csum_unfold(*sum)));
-               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
-                       skb->csum = ~csum_partial(diff, sizeof(diff),
-                                                 ~skb->csum);
        } else if (pseudohdr)
                *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
                                  csum_unfold(*sum)));
index 7c8390ad4dc613bf67b4845c37a7ed97360dd2a7..8310714c47fd7af0877dcf531ad46d0d2fef2903 100644 (file)
@@ -36,7 +36,7 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
        const u32 *k = data;
        const u32 key = *k;
 
-       BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
+       BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
                     != sizeof(u32));
 
        /* Use cyclic increasing ID as direct hash key */
@@ -56,7 +56,7 @@ static const struct rhashtable_params mem_id_rht_params = {
        .nelem_hint = 64,
        .head_offset = offsetof(struct xdp_mem_allocator, node),
        .key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
-       .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
+       .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
        .max_size = MEM_ID_MAX,
        .min_size = 8,
        .automatic_shrinking = true,
index a52e8ba1ced046b178fa069b1e0d690c537c6bc0..4af8a98fe7846bcb508352a0bd1947f8b8998cb5 100644 (file)
@@ -1132,7 +1132,7 @@ static int __init dccp_init(void)
        int rc;
 
        BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
-                    FIELD_SIZEOF(struct sk_buff, cb));
+                    sizeof_field(struct sk_buff, cb));
        rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
        if (rc)
                goto out_fail;
index aea918135ec370e3b12cea1ea14319c1ce5cb64b..08c3dc45f1a4344ca919b0fd1a6c73ec04247d05 100644 (file)
@@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb , u32 mtu);
+                              struct sk_buff *skb , u32 mtu,
+                              bool confirm_neigh);
 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
                            struct sk_buff *skb);
 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
@@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops)
  * advertise to the other end).
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb, u32 mtu)
+                              struct sk_buff *skb, u32 mtu,
+                              bool confirm_neigh)
 {
        struct dn_route *rt = (struct dn_route *) dst;
        struct neighbour *n = rt->n;
index 9ef2caa13f278e50ac2d88f583ed542aef1c4768..c66abbed4daf21f650f86e4d073c51657854d9b7 100644 (file)
@@ -124,7 +124,8 @@ static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
        return NULL;
 }
 
-struct dsa_link *dsa_link_touch(struct dsa_port *dp, struct dsa_port *link_dp)
+static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
+                                      struct dsa_port *link_dp)
 {
        struct dsa_switch *ds = dp->ds;
        struct dsa_switch_tree *dst;
index b678160bbd66e5992c7b7b28a819c25f6dd3ec50..408d4af390a0ee8bfd369790ca3428db7a79b156 100644 (file)
@@ -104,7 +104,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
 }
 
 static const struct dsa_device_ops gswip_netdev_ops = {
-       .name = "gwsip",
+       .name = "gswip",
        .proto  = DSA_TAG_PROTO_GSWIP,
        .xmit = gswip_tag_xmit,
        .rcv = gswip_tag_rcv,
index 73605bcbb38511f065b7d1b432d5ec8761344c5f..90d055c4df9e80b16aaa5aeb303f8d8a9e31291f 100644 (file)
@@ -84,8 +84,6 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
  *       (eg, 0x00=port1, 0x02=port3, 0x06=port7)
  */
 
-#define KSZ8795_INGRESS_TAG_LEN                1
-
 #define KSZ8795_TAIL_TAG_OVERRIDE      BIT(6)
 #define KSZ8795_TAIL_TAG_LOOKUP                BIT(7)
 
@@ -96,12 +94,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 *tag;
        u8 *addr;
 
-       nskb = ksz_common_xmit(skb, dev, KSZ8795_INGRESS_TAG_LEN);
+       nskb = ksz_common_xmit(skb, dev, KSZ_INGRESS_TAG_LEN);
        if (!nskb)
                return NULL;
 
        /* Tag encoding */
-       tag = skb_put(nskb, KSZ8795_INGRESS_TAG_LEN);
+       tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
        addr = skb_mac_header(nskb);
 
        *tag = 1 << dp->index;
@@ -124,7 +122,7 @@ static const struct dsa_device_ops ksz8795_netdev_ops = {
        .proto  = DSA_TAG_PROTO_KSZ8795,
        .xmit   = ksz8795_xmit,
        .rcv    = ksz8795_rcv,
-       .overhead = KSZ8795_INGRESS_TAG_LEN,
+       .overhead = KSZ_INGRESS_TAG_LEN,
 };
 
 DSA_TAG_DRIVER(ksz8795_netdev_ops);
index c95885215525193517a6745040e39a046bf6c494..c8a128c9e5e0f6c8ab048ae6f90aa12e1087cdd6 100644 (file)
@@ -33,9 +33,6 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
        struct dsa_port *dp = dsa_slave_to_port(dev);
        u16 *phdr, hdr;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        if (skb_cow_head(skb, 0) < 0)
                return NULL;
 
index 94447974a3c05d735cad2d26648f432a45e3697a..d5f709b940ffd058deb2870ea559dfb2f0db035a 100644 (file)
@@ -20,6 +20,8 @@
 #include "hsr_main.h"
 #include "hsr_framereg.h"
 
+static struct dentry *hsr_debugfs_root_dir;
+
 static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
 {
        seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
@@ -63,8 +65,20 @@ hsr_node_table_open(struct inode *inode, struct file *filp)
        return single_open(filp, hsr_node_table_show, inode->i_private);
 }
 
+void hsr_debugfs_rename(struct net_device *dev)
+{
+       struct hsr_priv *priv = netdev_priv(dev);
+       struct dentry *d;
+
+       d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
+                          hsr_debugfs_root_dir, dev->name);
+       if (IS_ERR(d))
+               netdev_warn(dev, "failed to rename\n");
+       else
+               priv->node_tbl_root = d;
+}
+
 static const struct file_operations hsr_fops = {
-       .owner  = THIS_MODULE,
        .open   = hsr_node_table_open,
        .read   = seq_read,
        .llseek = seq_lseek,
@@ -78,15 +92,14 @@ static const struct file_operations hsr_fops = {
  * When debugfs is configured this routine sets up the node_table file per
  * hsr device for dumping the node_table entries
  */
-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
 {
-       int rc = -1;
        struct dentry *de = NULL;
 
-       de = debugfs_create_dir(hsr_dev->name, NULL);
-       if (!de) {
-               pr_err("Cannot create hsr debugfs root\n");
-               return rc;
+       de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
+       if (IS_ERR(de)) {
+               pr_err("Cannot create hsr debugfs directory\n");
+               return;
        }
 
        priv->node_tbl_root = de;
@@ -94,13 +107,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
        de = debugfs_create_file("node_table", S_IFREG | 0444,
                                 priv->node_tbl_root, priv,
                                 &hsr_fops);
-       if (!de) {
-               pr_err("Cannot create hsr node_table directory\n");
-               return rc;
+       if (IS_ERR(de)) {
+               pr_err("Cannot create hsr node_table file\n");
+               debugfs_remove(priv->node_tbl_root);
+               priv->node_tbl_root = NULL;
+               return;
        }
        priv->node_tbl_file = de;
-
-       return 0;
 }
 
 /* hsr_debugfs_term - Tear down debugfs intrastructure
@@ -117,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv)
        debugfs_remove(priv->node_tbl_root);
        priv->node_tbl_root = NULL;
 }
+
+void hsr_debugfs_create_root(void)
+{
+       hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
+       if (IS_ERR(hsr_debugfs_root_dir)) {
+               pr_err("Cannot create hsr debugfs root directory\n");
+               hsr_debugfs_root_dir = NULL;
+       }
+}
+
+void hsr_debugfs_remove_root(void)
+{
+       /* debugfs_remove() internally checks NULL and ERROR */
+       debugfs_remove(hsr_debugfs_root_dir);
+}
index b01e1bae4ddc079d60ac319991fd3867216bff9a..c7bd6c49fadff47daaeccd57cc0ce835e84696f4 100644 (file)
@@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
                            skb->dev->dev_addr, skb->len) <= 0)
                goto out;
        skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
 
        if (hsr_ver > 0) {
                hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
@@ -368,7 +370,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
        del_timer_sync(&hsr->prune_timer);
        del_timer_sync(&hsr->announce_timer);
 
-       hsr_del_self_node(&hsr->self_node_db);
+       hsr_del_self_node(hsr);
        hsr_del_nodes(&hsr->node_db);
 }
 
@@ -440,11 +442,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        INIT_LIST_HEAD(&hsr->ports);
        INIT_LIST_HEAD(&hsr->node_db);
        INIT_LIST_HEAD(&hsr->self_node_db);
+       spin_lock_init(&hsr->list_lock);
 
        ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
 
        /* Make sure we recognize frames from ourselves in hsr_rcv() */
-       res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
+       res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
                                   slave[1]->dev_addr);
        if (res < 0)
                return res;
@@ -477,31 +480,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
 
        res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
        if (res)
-               goto err_add_port;
+               goto err_add_master;
 
        res = register_netdevice(hsr_dev);
        if (res)
-               goto fail;
+               goto err_unregister;
 
        res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
        if (res)
-               goto fail;
+               goto err_add_slaves;
+
        res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
        if (res)
-               goto fail;
+               goto err_add_slaves;
 
+       hsr_debugfs_init(hsr, hsr_dev);
        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
-       res = hsr_debugfs_init(hsr, hsr_dev);
-       if (res)
-               goto fail;
 
        return 0;
 
-fail:
+err_add_slaves:
+       unregister_netdevice(hsr_dev);
+err_unregister:
        list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
                hsr_del_port(port);
-err_add_port:
-       hsr_del_self_node(&hsr->self_node_db);
+err_add_master:
+       hsr_del_self_node(hsr);
 
        return res;
 }
index 292be446007b481d3b88ec1d06ffb494059aa677..27dc65d7de67a17d362985960b83850c894dfac6 100644 (file)
@@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
 /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
  * frames from self that's been looped over the HSR ring.
  */
-int hsr_create_self_node(struct list_head *self_node_db,
+int hsr_create_self_node(struct hsr_priv *hsr,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN])
 {
+       struct list_head *self_node_db = &hsr->self_node_db;
        struct hsr_node *node, *oldnode;
 
        node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db,
        ether_addr_copy(node->macaddress_A, addr_a);
        ether_addr_copy(node->macaddress_B, addr_b);
 
-       rcu_read_lock();
+       spin_lock_bh(&hsr->list_lock);
        oldnode = list_first_or_null_rcu(self_node_db,
                                         struct hsr_node, mac_list);
        if (oldnode) {
                list_replace_rcu(&oldnode->mac_list, &node->mac_list);
-               rcu_read_unlock();
-               synchronize_rcu();
-               kfree(oldnode);
+               spin_unlock_bh(&hsr->list_lock);
+               kfree_rcu(oldnode, rcu_head);
        } else {
-               rcu_read_unlock();
                list_add_tail_rcu(&node->mac_list, self_node_db);
+               spin_unlock_bh(&hsr->list_lock);
        }
 
        return 0;
 }
 
-void hsr_del_self_node(struct list_head *self_node_db)
+void hsr_del_self_node(struct hsr_priv *hsr)
 {
+       struct list_head *self_node_db = &hsr->self_node_db;
        struct hsr_node *node;
 
-       rcu_read_lock();
+       spin_lock_bh(&hsr->list_lock);
        node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
-       rcu_read_unlock();
        if (node) {
                list_del_rcu(&node->mac_list);
-               kfree(node);
+               kfree_rcu(node, rcu_head);
        }
+       spin_unlock_bh(&hsr->list_lock);
 }
 
 void hsr_del_nodes(struct list_head *node_db)
@@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db)
  * seq_out is used to initialize filtering of outgoing duplicate frames
  * originating from the newly added node.
  */
-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
-                             u16 seq_out)
+static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+                                    struct list_head *node_db,
+                                    unsigned char addr[],
+                                    u16 seq_out)
 {
-       struct hsr_node *node;
+       struct hsr_node *new_node, *node;
        unsigned long now;
        int i;
 
-       node = kzalloc(sizeof(*node), GFP_ATOMIC);
-       if (!node)
+       new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+       if (!new_node)
                return NULL;
 
-       ether_addr_copy(node->macaddress_A, addr);
+       ether_addr_copy(new_node->macaddress_A, addr);
 
        /* We are only interested in time diffs here, so use current jiffies
         * as initialization. (0 could trigger an spurious ring error warning).
         */
        now = jiffies;
        for (i = 0; i < HSR_PT_PORTS; i++)
-               node->time_in[i] = now;
+               new_node->time_in[i] = now;
        for (i = 0; i < HSR_PT_PORTS; i++)
-               node->seq_out[i] = seq_out;
-
-       list_add_tail_rcu(&node->mac_list, node_db);
+               new_node->seq_out[i] = seq_out;
 
+       spin_lock_bh(&hsr->list_lock);
+       list_for_each_entry_rcu(node, node_db, mac_list) {
+               if (ether_addr_equal(node->macaddress_A, addr))
+                       goto out;
+               if (ether_addr_equal(node->macaddress_B, addr))
+                       goto out;
+       }
+       list_add_tail_rcu(&new_node->mac_list, node_db);
+       spin_unlock_bh(&hsr->list_lock);
+       return new_node;
+out:
+       spin_unlock_bh(&hsr->list_lock);
+       kfree(new_node);
        return node;
 }
 
@@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                              bool is_sup)
 {
        struct list_head *node_db = &port->hsr->node_db;
+       struct hsr_priv *hsr = port->hsr;
        struct hsr_node *node;
        struct ethhdr *ethhdr;
        u16 seq_out;
@@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                seq_out = HSR_SEQNR_START;
        }
 
-       return hsr_add_node(node_db, ethhdr->h_source, seq_out);
+       return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
 }
 
 /* Use the Supervision frame's info about an eventual macaddress_B for merging
@@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
                          struct hsr_port *port_rcv)
 {
-       struct ethhdr *ethhdr;
-       struct hsr_node *node_real;
+       struct hsr_priv *hsr = port_rcv->hsr;
        struct hsr_sup_payload *hsr_sp;
+       struct hsr_node *node_real;
        struct list_head *node_db;
+       struct ethhdr *ethhdr;
        int i;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
        if (!node_real)
                /* No frame received from AddrA of this node yet */
-               node_real = hsr_add_node(node_db, hsr_sp->macaddress_A,
+               node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
                                         HSR_SEQNR_START - 1);
        if (!node_real)
                goto done; /* No mem */
@@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
        }
        node_real->addr_B_port = port_rcv->type;
 
+       spin_lock_bh(&hsr->list_lock);
        list_del_rcu(&node_curr->mac_list);
+       spin_unlock_bh(&hsr->list_lock);
        kfree_rcu(node_curr, rcu_head);
 
 done:
@@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t)
 {
        struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
        struct hsr_node *node;
+       struct hsr_node *tmp;
        struct hsr_port *port;
        unsigned long timestamp;
        unsigned long time_a, time_b;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
+       spin_lock_bh(&hsr->list_lock);
+       list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
                /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
                 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
                 * the master port. Thus the master node will be repeatedly
@@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t)
                        kfree_rcu(node, rcu_head);
                }
        }
-       rcu_read_unlock();
+       spin_unlock_bh(&hsr->list_lock);
 
        /* Restart timer */
        mod_timer(&hsr->prune_timer,
index 89a3ce38151d187faa595b242f6ea0e75d535985..0f0fa12b432937fdae50dc2f57953fffe238c20b 100644 (file)
 
 struct hsr_node;
 
-void hsr_del_self_node(struct list_head *self_node_db);
+void hsr_del_self_node(struct hsr_priv *hsr);
 void hsr_del_nodes(struct list_head *node_db);
-struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
-                             u16 seq_out);
 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
                              bool is_sup);
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
@@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
 
 void hsr_prune_nodes(struct timer_list *t);
 
-int hsr_create_self_node(struct list_head *self_node_db,
+int hsr_create_self_node(struct hsr_priv *hsr,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN]);
 
index b9988a662ee1ac14c3ad8b96ea18a2dca92f5254..9e389accbfc7e52bc677b5f3d8cb59e93f4d9bf0 100644 (file)
@@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
        case NETDEV_CHANGE:     /* Link (carrier) state changes */
                hsr_check_carrier_and_operstate(hsr);
                break;
+       case NETDEV_CHANGENAME:
+               if (is_hsr_master(dev))
+                       hsr_debugfs_rename(dev);
+               break;
        case NETDEV_CHANGEADDR:
                if (port->type == HSR_PT_MASTER) {
                        /* This should not happen since there's no
@@ -64,7 +68,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 
                /* Make sure we recognize frames from ourselves in hsr_rcv() */
                port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
-               res = hsr_create_self_node(&hsr->self_node_db,
+               res = hsr_create_self_node(hsr,
                                           master->dev->dev_addr,
                                           port ?
                                                port->dev->dev_addr :
@@ -123,6 +127,7 @@ static void __exit hsr_exit(void)
 {
        unregister_netdevice_notifier(&hsr_nb);
        hsr_netlink_exit();
+       hsr_debugfs_remove_root();
 }
 
 module_init(hsr_init);
index 96fac696a1e1a602fdf46c4df74016b7baf50008..754d84b217f048fe0b61c4a8b986340b7ce3c398 100644 (file)
@@ -160,8 +160,9 @@ struct hsr_priv {
        int announce_count;
        u16 sequence_nr;
        u16 sup_sequence_nr;    /* For HSRv1 separate seq_nr for supervision */
-       u8 prot_version;                /* Indicate if HSRv0 or HSRv1. */
-       spinlock_t seqnr_lock;                  /* locking for sequence_nr */
+       u8 prot_version;        /* Indicate if HSRv0 or HSRv1. */
+       spinlock_t seqnr_lock;  /* locking for sequence_nr */
+       spinlock_t list_lock;   /* locking for node list */
        unsigned char           sup_multicast_addr[ETH_ALEN];
 #ifdef CONFIG_DEBUG_FS
        struct dentry *node_tbl_root;
@@ -184,17 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
 }
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
-int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
+void hsr_debugfs_rename(struct net_device *dev);
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
 void hsr_debugfs_term(struct hsr_priv *priv);
+void hsr_debugfs_create_root(void);
+void hsr_debugfs_remove_root(void);
 #else
-static inline int hsr_debugfs_init(struct hsr_priv *priv,
-                                  struct net_device *hsr_dev)
+static inline void hsr_debugfs_rename(struct net_device *dev)
 {
-       return 0;
 }
-
+static inline void hsr_debugfs_init(struct hsr_priv *priv,
+                                   struct net_device *hsr_dev)
+{}
 static inline void hsr_debugfs_term(struct hsr_priv *priv)
 {}
+static inline void hsr_debugfs_create_root(void)
+{}
+static inline void hsr_debugfs_remove_root(void)
+{}
 #endif
 
 #endif /*  __HSR_PRIVATE_H */
index 8f8337f893badcbe854108fba737ac9cf45a32ee..8dc0547f01d0bfc296fb61775f083cf82d8e33b1 100644 (file)
@@ -476,6 +476,7 @@ int __init hsr_netlink_init(void)
        if (rc)
                goto fail_genl_register_family;
 
+       hsr_debugfs_create_root();
        return 0;
 
 fail_genl_register_family:
index 0e4a7cf6bc87d2b026d32172f6fbebc673548f90..e2e219c7854a058712c982d8f995b08d3bdb5776 100644 (file)
@@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
                if (!x)
                        goto out_reset;
 
+               skb->mark = xfrm_smark_get(skb->mark, x);
+
                sp->xvec[sp->len++] = x;
                sp->olen++;
 
index b9df9c09b84e5e6af9d6fcd1386ea669ba14a560..195469a133713a73bccf1d970121ef9866cc3766 100644 (file)
@@ -2193,6 +2193,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
        int count = cb->args[2];
        t_key key = cb->args[3];
 
+       /* First time here, count and key are both always 0. Count > 0
+        * and key == 0 means the dump has wrapped around and we are done.
+        */
+       if (count && !key)
+               return skb->len;
+
        while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
                int err;
 
index 30fa771d382a0a723c8d9b9610950b681e17e362..dcc79ff54b41640fa048810f164655e7b4d5bc65 100644 (file)
@@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
        [FOU_ATTR_REMCSUM_NOPARTIAL]    = { .type = NLA_FLAG, },
        [FOU_ATTR_LOCAL_V4]             = { .type = NLA_U32, },
        [FOU_ATTR_PEER_V4]              = { .type = NLA_U32, },
-       [FOU_ATTR_LOCAL_V6]             = { .type = sizeof(struct in6_addr), },
-       [FOU_ATTR_PEER_V6]              = { .type = sizeof(struct in6_addr), },
+       [FOU_ATTR_LOCAL_V6]             = { .len = sizeof(struct in6_addr), },
+       [FOU_ATTR_PEER_V6]              = { .len = sizeof(struct in6_addr), },
        [FOU_ATTR_PEER_PORT]            = { .type = NLA_U16, },
        [FOU_ATTR_IFINDEX]              = { .type = NLA_S32, },
 };
index e4c6e8b4049063f5239a5e99a185016ad3bb5790..18c0d5bffe12b04b48faedc247bb3e491a7abcea 100644 (file)
@@ -1086,7 +1086,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
                if (!dst)
                        goto out;
        }
-       dst->ops->update_pmtu(dst, sk, NULL, mtu);
+       dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
 
        dst = __sk_dst_check(sk, 0);
        if (!dst)
index af154977904c0c249e77e425990a09c62cca4251..f11e997e517b6652479acd892ffea6cdeb940e33 100644 (file)
@@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
 
                for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
                        struct inet_listen_hashbucket *ilb;
+                       struct hlist_nulls_node *node;
 
                        num = 0;
                        ilb = &hashinfo->listening_hash[i];
                        spin_lock(&ilb->lock);
-                       sk_for_each(sk, &ilb->head) {
+                       sk_nulls_for_each(sk, node, &ilb->nulls_head) {
                                struct inet_sock *inet = inet_sk(sk);
 
                                if (!net_eq(sock_net(sk), net))
index 83fb00153018f16678e71695134f7ae4d30ee0a3..2bbaaf0c717634502aa64098d1f4581b3eb1207d 100644 (file)
@@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
                                   struct inet_listen_hashbucket *ilb)
 {
        struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
+       const struct hlist_nulls_node *node;
        struct sock *sk2;
        kuid_t uid = sock_i_uid(sk);
 
-       sk_for_each_rcu(sk2, &ilb->head) {
+       sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
                if (sk2 != sk &&
                    sk2->sk_family == sk->sk_family &&
                    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
        }
        if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
                sk->sk_family == AF_INET6)
-               hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
+               __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
        else
-               hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+               __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
        inet_hash2(hashinfo, sk);
        ilb->count++;
        sock_set_flag(sk, SOCK_RCU_FREE);
@@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk)
                reuseport_detach_sock(sk);
        if (ilb) {
                inet_unhash2(hashinfo, sk);
-                __sk_del_node_init(sk);
-                ilb->count--;
-       } else {
-               __sk_nulls_del_node_init_rcu(sk);
+               ilb->count--;
        }
+       __sk_nulls_del_node_init_rcu(sk);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 unlock:
        spin_unlock_bh(lock);
@@ -750,7 +749,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
 
        for (i = 0; i < INET_LHTABLE_SIZE; i++) {
                spin_lock_init(&h->listening_hash[i].lock);
-               INIT_HLIST_HEAD(&h->listening_hash[i].head);
+               INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
+                                     i + LISTENING_NULLS_BASE);
                h->listening_hash[i].count = 0;
        }
 
index 572b6307a2dff1a29c6686fb963aeb4a51410447..8274f98c511cc0ca0cfe721f46941680aa9c7378 100644 (file)
@@ -1464,8 +1464,8 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
        [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
        [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
        [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
-       [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
-       [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_GRE_LOCAL]        = { .len = sizeof_field(struct iphdr, saddr) },
+       [IFLA_GRE_REMOTE]       = { .len = sizeof_field(struct iphdr, daddr) },
        [IFLA_GRE_TTL]          = { .type = NLA_U8 },
        [IFLA_GRE_TOS]          = { .type = NLA_U8 },
        [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
index 38c02bb62e2cdd8b379d01e3db6e2fc2cedc3004..74e1d964a615280d59370a4c958726e2cef7d474 100644 (file)
@@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
                mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
        if (skb_valid_dst(skb))
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
        if (skb->protocol == htons(ETH_P_IP)) {
                if (!skb_is_gso(skb) &&
@@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
        iph->version            = 4;
        iph->ihl                = 5;
 
-       if (tunnel->collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
+       if (tunnel->collect_md)
                netif_keep_dst(dev);
-       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
index cfb0256067936839888b9c910997ff1a784021d1..37cddd18f2828c40ae2cb931bc40038c2ff41e5b 100644 (file)
@@ -187,8 +187,17 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        int mtu;
 
        if (!dst) {
-               dev->stats.tx_carrier_errors++;
-               goto tx_error_icmp;
+               struct rtable *rt;
+
+               fl->u.ip4.flowi4_oif = dev->ifindex;
+               fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+               rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+               if (IS_ERR(rt)) {
+                       dev->stats.tx_carrier_errors++;
+                       goto tx_error_icmp;
+               }
+               dst = &rt->dst;
+               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
@@ -214,7 +223,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
 
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                  htonl(mtu));
@@ -580,8 +589,8 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
        [IFLA_VTI_LINK]         = { .type = NLA_U32 },
        [IFLA_VTI_IKEY]         = { .type = NLA_U32 },
        [IFLA_VTI_OKEY]         = { .type = NLA_U32 },
-       [IFLA_VTI_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
-       [IFLA_VTI_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_VTI_LOCAL]        = { .len = sizeof_field(struct iphdr, saddr) },
+       [IFLA_VTI_REMOTE]       = { .len = sizeof_field(struct iphdr, daddr) },
        [IFLA_VTI_FWMARK]       = { .type = NLA_U32 },
 };
 
index 214154b47d56c7af594496abf719252a13a5594a..f1f78a742b36a18962a7a66884607e2aaa0a28bb 100644 (file)
@@ -384,10 +384,11 @@ next:             ;
        return 1;
 }
 
-static inline int check_target(struct arpt_entry *e, const char *name)
+static int check_target(struct arpt_entry *e, struct net *net, const char *name)
 {
        struct xt_entry_target *t = arpt_get_target(e);
        struct xt_tgchk_param par = {
+               .net       = net,
                .table     = name,
                .entryinfo = e,
                .target    = t->u.kernel.target,
@@ -399,8 +400,9 @@ static inline int check_target(struct arpt_entry *e, const char *name)
        return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
 }
 
-static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+static int
+find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
+                unsigned int size,
                 struct xt_percpu_counter_alloc_state *alloc_state)
 {
        struct xt_entry_target *t;
@@ -419,7 +421,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
        }
        t->u.kernel.target = target;
 
-       ret = check_target(e, name);
+       ret = check_target(e, net, name);
        if (ret)
                goto err;
        return 0;
@@ -494,12 +496,13 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
        return 0;
 }
 
-static inline void cleanup_entry(struct arpt_entry *e)
+static void cleanup_entry(struct arpt_entry *e, struct net *net)
 {
        struct xt_tgdtor_param par;
        struct xt_entry_target *t;
 
        t = arpt_get_target(e);
+       par.net      = net;
        par.target   = t->u.kernel.target;
        par.targinfo = t->data;
        par.family   = NFPROTO_ARP;
@@ -512,7 +515,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
 /* Checks and translates the user-supplied table segment (held in
  * newinfo).
  */
-static int translate_table(struct xt_table_info *newinfo, void *entry0,
+static int translate_table(struct net *net,
+                          struct xt_table_info *newinfo,
+                          void *entry0,
                           const struct arpt_replace *repl)
 {
        struct xt_percpu_counter_alloc_state alloc_state = { 0 };
@@ -569,7 +574,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
        /* Finally, each sanity check must pass */
        i = 0;
        xt_entry_foreach(iter, entry0, newinfo->size) {
-               ret = find_check_entry(iter, repl->name, repl->size,
+               ret = find_check_entry(iter, net, repl->name, repl->size,
                                       &alloc_state);
                if (ret != 0)
                        break;
@@ -580,7 +585,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
                xt_entry_foreach(iter, entry0, newinfo->size) {
                        if (i-- == 0)
                                break;
-                       cleanup_entry(iter);
+                       cleanup_entry(iter, net);
                }
                return ret;
        }
@@ -923,7 +928,7 @@ static int __do_replace(struct net *net, const char *name,
        /* Decrease module usage counts and free resource */
        loc_cpu_old_entry = oldinfo->entries;
        xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
 
        xt_free_table_info(oldinfo);
        if (copy_to_user(counters_ptr, counters,
@@ -974,7 +979,7 @@ static int do_replace(struct net *net, const void __user *user,
                goto free_newinfo;
        }
 
-       ret = translate_table(newinfo, loc_cpu_entry, &tmp);
+       ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
@@ -986,7 +991,7 @@ static int do_replace(struct net *net, const void __user *user,
 
  free_newinfo_untrans:
        xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
  free_newinfo:
        xt_free_table_info(newinfo);
        return ret;
@@ -1149,7 +1154,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
        }
 }
 
-static int translate_compat_table(struct xt_table_info **pinfo,
+static int translate_compat_table(struct net *net,
+                                 struct xt_table_info **pinfo,
                                  void **pentry0,
                                  const struct compat_arpt_replace *compatr)
 {
@@ -1217,7 +1223,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
        repl.num_counters = 0;
        repl.counters = NULL;
        repl.size = newinfo->size;
-       ret = translate_table(newinfo, entry1, &repl);
+       ret = translate_table(net, newinfo, entry1, &repl);
        if (ret)
                goto free_newinfo;
 
@@ -1270,7 +1276,7 @@ static int compat_do_replace(struct net *net, void __user *user,
                goto free_newinfo;
        }
 
-       ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+       ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
        if (ret != 0)
                goto free_newinfo;
 
@@ -1282,7 +1288,7 @@ static int compat_do_replace(struct net *net, void __user *user,
 
  free_newinfo_untrans:
        xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
  free_newinfo:
        xt_free_table_info(newinfo);
        return ret;
@@ -1509,7 +1515,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
        return ret;
 }
 
-static void __arpt_unregister_table(struct xt_table *table)
+static void __arpt_unregister_table(struct net *net, struct xt_table *table)
 {
        struct xt_table_info *private;
        void *loc_cpu_entry;
@@ -1521,7 +1527,7 @@ static void __arpt_unregister_table(struct xt_table *table)
        /* Decrease module usage counts and free resources */
        loc_cpu_entry = private->entries;
        xt_entry_foreach(iter, loc_cpu_entry, private->size)
-               cleanup_entry(iter);
+               cleanup_entry(iter, net);
        if (private->number > private->initial_entries)
                module_put(table_owner);
        xt_free_table_info(private);
@@ -1546,7 +1552,7 @@ int arpt_register_table(struct net *net,
        loc_cpu_entry = newinfo->entries;
        memcpy(loc_cpu_entry, repl->entries, repl->size);
 
-       ret = translate_table(newinfo, loc_cpu_entry, repl);
+       ret = translate_table(net, newinfo, loc_cpu_entry, repl);
        if (ret != 0)
                goto out_free;
 
@@ -1561,7 +1567,7 @@ int arpt_register_table(struct net *net,
 
        ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
        if (ret != 0) {
-               __arpt_unregister_table(new_table);
+               __arpt_unregister_table(net, new_table);
                *res = NULL;
        }
 
@@ -1576,7 +1582,7 @@ void arpt_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops)
 {
        nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
-       __arpt_unregister_table(table);
+       __arpt_unregister_table(net, table);
 }
 
 /* The built-in targets: standard (NULL) and error. */
index f88c93c38f1173614363b10cab7362771dd9ed8c..e356ea77922729f45711006926e519c45e552516 100644 (file)
@@ -139,7 +139,8 @@ static unsigned int  ipv4_mtu(const struct dst_entry *dst);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
 static void             ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                          struct sk_buff *skb, u32 mtu);
+                                          struct sk_buff *skb, u32 mtu,
+                                          bool confirm_neigh);
 static void             ip_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 static void            ipv4_dst_destroy(struct dst_entry *dst);
@@ -270,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu+1;
                return &per_cpu(rt_cache_stat, cpu);
        }
+       (*pos)++;
        return NULL;
 
 }
@@ -1043,7 +1045,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
 }
 
 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                             struct sk_buff *skb, u32 mtu)
+                             struct sk_buff *skb, u32 mtu,
+                             bool confirm_neigh)
 {
        struct rtable *rt = (struct rtable *) dst;
        struct flowi4 fl4;
@@ -2687,7 +2690,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
 }
 
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                         struct sk_buff *skb, u32 mtu)
+                                         struct sk_buff *skb, u32 mtu,
+                                         bool confirm_neigh)
 {
 }
 
index 8a39ee79489192c02385aaadc8d1ae969fb55d23..a7d766e6390e2be4f759b9d89eb70a900cfd4a46 100644 (file)
@@ -1087,8 +1087,7 @@ do_error:
                goto out;
 out_err:
        /* make sure we wake any epoll edge trigger waiter */
-       if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
-                    err == -EAGAIN)) {
+       if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
                sk->sk_write_space(sk);
                tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
        }
@@ -1419,8 +1418,7 @@ out_err:
        sock_zerocopy_put_abort(uarg, true);
        err = sk_stream_error(sk, flags, err);
        /* make sure we wake any epoll edge trigger waiter */
-       if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
-                    err == -EAGAIN)) {
+       if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
                sk->sk_write_space(sk);
                tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
        }
@@ -2526,6 +2524,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
 {
        struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
 
+       tcp_sk(sk)->highest_sack = NULL;
        while (p) {
                struct sk_buff *skb = rb_to_skb(p);
 
@@ -2616,7 +2615,6 @@ int tcp_disconnect(struct sock *sk, int flags)
        WRITE_ONCE(tp->write_seq, seq);
 
        icsk->icsk_backoff = 0;
-       tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
@@ -3949,7 +3947,7 @@ void __init tcp_init(void)
 
        BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
-                    FIELD_SIZEOF(struct sk_buff, cb));
+                    sizeof_field(struct sk_buff, cb));
 
        percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
        percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
index 32772d6ded4ed359aa4d09ba67071e88a79ebdeb..6c4d79baff2696b859d214bb63108b9f2f61f457 100644 (file)
@@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
        /* Sort of tcp_tso_autosize() but ignoring
         * driver provided sk_gso_max_size.
         */
-       bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift,
+       bytes = min_t(unsigned long,
+                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
                      GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
        segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
 
@@ -778,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
         * bandwidth sample. Delivered is in packets and interval_us in uS and
         * ratio will be <<1 for most connections. So delivered is first scaled.
         */
-       bw = (u64)rs->delivered * BW_UNIT;
-       do_div(bw, rs->interval_us);
+       bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
 
        /* If this sample is application-limited, it is likely to have a very
         * low delivered count that represents application behavior rather than
index e38705165ac9bd48910348169fc9904c41fb48dc..8a01428f80c1c4b23e2ae0568efb2aa0e0f71ccb 100644 (file)
@@ -121,14 +121,14 @@ int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        struct sk_psock *psock;
        int copied, ret;
 
-       if (unlikely(flags & MSG_ERRQUEUE))
-               return inet_recv_error(sk, msg, len, addr_len);
-       if (!skb_queue_empty(&sk->sk_receive_queue))
-               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+       if (unlikely(flags & MSG_ERRQUEUE))
+               return inet_recv_error(sk, msg, len, addr_len);
+       if (!skb_queue_empty(&sk->sk_receive_queue) &&
+           sk_psock_queue_empty(psock))
+               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
        lock_sock(sk);
 msg_bytes_ready:
        copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
@@ -139,7 +139,7 @@ msg_bytes_ready:
                timeo = sock_rcvtimeo(sk, nonblock);
                data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
                if (data) {
-                       if (skb_queue_empty(&sk->sk_receive_queue))
+                       if (!sk_psock_queue_empty(psock))
                                goto msg_bytes_ready;
                        release_sock(sk);
                        sk_psock_put(sk, psock);
@@ -315,10 +315,7 @@ more_data:
                 */
                delta = msg->sg.size;
                psock->eval = sk_psock_msg_verdict(sk, psock, msg);
-               if (msg->sg.size < delta)
-                       delta -= msg->sg.size;
-               else
-                       delta = 0;
+               delta -= msg->sg.size;
        }
 
        if (msg->cork_bytes &&
index 88b987ca9ebbbbf0f28ea4560e3731837aea7fb3..2a976f57f7e78fcb1a780959b61906ad23285938 100644 (file)
@@ -915,9 +915,10 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
 /* This must be called before lost_out is incremented */
 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if (!tp->retransmit_skb_hint ||
-           before(TCP_SKB_CB(skb)->seq,
-                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+       if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
+           (tp->retransmit_skb_hint &&
+            before(TCP_SKB_CB(skb)->seq,
+                   TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
                tp->retransmit_skb_hint = skb;
 }
 
@@ -1727,8 +1728,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                }
 
                /* Ignore very old stuff early */
-               if (!after(sp[used_sacks].end_seq, prior_snd_una))
+               if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
+                       if (i == 0)
+                               first_sack_index = -1;
                        continue;
+               }
 
                used_sacks++;
        }
@@ -3160,6 +3164,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                        tp->retransmit_skb_hint = NULL;
                if (unlikely(skb == tp->lost_skb_hint))
                        tp->lost_skb_hint = NULL;
+               tcp_highest_sack_replace(sk, skb, next);
                tcp_rtx_queue_unlink_and_free(skb, sk);
        }
 
index 92282f98dc82290bfaf53acc050182e4cc3be1eb..1c7326e04f9bee463500e17ea7c6eb840a31d89f 100644 (file)
@@ -2147,13 +2147,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
        struct inet_listen_hashbucket *ilb;
+       struct hlist_nulls_node *node;
        struct sock *sk = cur;
 
        if (!sk) {
 get_head:
                ilb = &tcp_hashinfo.listening_hash[st->bucket];
                spin_lock(&ilb->lock);
-               sk = sk_head(&ilb->head);
+               sk = sk_nulls_head(&ilb->nulls_head);
                st->offset = 0;
                goto get_sk;
        }
@@ -2161,9 +2162,9 @@ get_head:
        ++st->num;
        ++st->offset;
 
-       sk = sk_next(sk);
+       sk = sk_nulls_next(sk);
 get_sk:
-       sk_for_each_from(sk) {
+       sk_nulls_for_each_from(sk, node) {
                if (!net_eq(sock_net(sk), net))
                        continue;
                if (sk->sk_family == afinfo->family)
index b184f03d743715ef4b2d166ceae651529be77953..b62b59b18db96413f8338a4e778808659baa86ea 100644 (file)
@@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
        __skb_unlink(skb, &sk->sk_write_queue);
        tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
 
+       if (tp->highest_sack == NULL)
+               tp->highest_sack = skb;
+
        tp->packets_out += tcp_skb_pcount(skb);
        if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
                tcp_rearm_rto(sk);
@@ -1725,7 +1728,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
        u32 bytes, segs;
 
        bytes = min_t(unsigned long,
-                     sk->sk_pacing_rate >> sk->sk_pacing_shift,
+                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
                      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
 
        /* Goal is to send at least one packet per ms,
@@ -2260,7 +2263,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
 
        limit = max_t(unsigned long,
                      2 * skb->truesize,
-                     sk->sk_pacing_rate >> sk->sk_pacing_shift);
+                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
        if (sk->sk_pacing_status == SK_PACING_NONE)
                limit = min_t(unsigned long, limit,
                              sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
@@ -2438,6 +2441,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (tcp_small_queue_check(sk, skb, 0))
                        break;
 
+               /* Argh, we hit an empty skb(), presumably a thread
+                * is sleeping in sendmsg()/sk_stream_wait_memory().
+                * We do not want to send a pure-ack packet and have
+                * a strange looking rtx queue with empty packet(s).
+                */
+               if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
+                       break;
+
                if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
                        break;
 
@@ -3121,7 +3132,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
  */
 void tcp_send_fin(struct sock *sk)
 {
-       struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+       struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
        /* Optimization, tack on the FIN if we have one skb in write queue and
@@ -3129,6 +3140,7 @@ void tcp_send_fin(struct sock *sk)
         * Note: in the latter case, FIN packet will be sent after a timeout,
         * as TCP stack thinks it has already been transmitted.
         */
+       tskb = tail;
        if (!tskb && tcp_under_memory_pressure(sk))
                tskb = skb_rb_last(&sk->tcp_rtx_queue);
 
@@ -3136,7 +3148,7 @@ void tcp_send_fin(struct sock *sk)
                TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(tskb)->end_seq++;
                tp->write_seq++;
-               if (tcp_write_queue_empty(sk)) {
+               if (!tail) {
                        /* This means tskb was already sent.
                         * Pretend we included the FIN on previous transmit.
                         * We need to set tp->snd_nxt to the value it would have
@@ -3220,6 +3232,7 @@ int tcp_send_synack(struct sock *sk)
                        if (!nskb)
                                return -ENOMEM;
                        INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
+                       tcp_highest_sack_replace(sk, skb, nskb);
                        tcp_rtx_queue_unlink_and_free(skb, sk);
                        __skb_header_release(nskb);
                        tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
index 12ab5db2b71cb5781fd9517d944138e1cecbd5dc..38d3ad1411611ef6002a9843ec7699a860f1485a 100644 (file)
@@ -99,17 +99,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen)
        rcu_read_unlock();
 }
 
-void tcp_update_ulp(struct sock *sk, struct proto *proto)
+void tcp_update_ulp(struct sock *sk, struct proto *proto,
+                   void (*write_space)(struct sock *sk))
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (!icsk->icsk_ulp_ops) {
+               sk->sk_write_space = write_space;
                sk->sk_prot = proto;
                return;
        }
 
        if (icsk->icsk_ulp_ops->update)
-               icsk->icsk_ulp_ops->update(sk, proto);
+               icsk->icsk_ulp_ops->update(sk, proto, write_space);
 }
 
 void tcp_cleanup_ulp(struct sock *sk)
index 4da5758cc718739b63c7dda65cf889869b3b6141..030d43c7c957f03227efa4a3775b5b1468f40dc8 100644 (file)
@@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
        if (likely(partial)) {
                up->forward_deficit += size;
                size = up->forward_deficit;
-               if (size < (sk->sk_rcvbuf >> 2))
+               if (size < (sk->sk_rcvbuf >> 2) &&
+                   !skb_queue_empty(&up->reader_queue))
                        return;
        } else {
                size += up->forward_deficit;
@@ -1475,7 +1476,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
         * queue contains some other skb
         */
        rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
-       if (rmem > (size + sk->sk_rcvbuf))
+       if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
                goto uncharge_drop;
 
        spin_lock(&list->lock);
index 35b84b52b7027b35f65b36d6a0160913e8df2c5a..9ebd54752e03b81a01f6c53cc17cebbccd928137 100644 (file)
@@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 }
 
 static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                             struct sk_buff *skb, u32 mtu)
+                             struct sk_buff *skb, u32 mtu,
+                             bool confirm_neigh)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
        struct dst_entry *path = xdst->route;
 
-       path->ops->update_pmtu(path, sk, skb, mtu);
+       path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
 }
 
 static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
index 98d82305d6ded0a3afe05971ad2f6f48df92944a..39d861d0037719ecb02a3341b923d412b5cdc0c1 100644 (file)
@@ -5231,16 +5231,16 @@ static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
                return -EINVAL;
        }
 
+       if (!netlink_strict_get_check(skb))
+               return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
+                                             ifa_ipv6_policy, extack);
+
        ifm = nlmsg_data(nlh);
        if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
                NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
                return -EINVAL;
        }
 
-       if (!netlink_strict_get_check(skb))
-               return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
-                                             ifa_ipv6_policy, extack);
-
        err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
                                            ifa_ipv6_policy, extack);
        if (err)
index e31626ffccd18ef2b1a7303ab1c1ef2ef38e9d51..fd535053245b616eca675e65c89bed679fe796a8 100644 (file)
@@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
                if (!x)
                        goto out_reset;
 
+               skb->mark = xfrm_smark_get(skb->mark, x);
+
                sp->xvec[sp->len++] = x;
                sp->olen++;
 
index fe9cb8d1adca069b39990307ac8ab1f7972e598a..e315526fa244ab83363495d4cfa0def7e3decf85 100644 (file)
@@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
 
        if (IS_ERR(dst))
                return NULL;
-       dst->ops->update_pmtu(dst, sk, NULL, mtu);
+       dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
 
        dst = inet6_csk_route_socket(sk, &fl6);
        return IS_ERR(dst) ? NULL : dst;
index 7bae6a91b48726173ef285e0449a4bcb29d75993..cfae0a1529a1dc14e1494177f9978451d6d9e5f5 100644 (file)
@@ -2495,14 +2495,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
 
+       ++(*pos);
        if (!v)
                goto iter_table;
 
        n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
-       if (n) {
-               ++*pos;
+       if (n)
                return n;
-       }
 
 iter_table:
        ipv6_route_check_sernum(iter);
@@ -2510,8 +2509,6 @@ iter_table:
        r = fib6_walk_continue(&iter->w);
        spin_unlock_bh(&iter->tbl->tb6_lock);
        if (r > 0) {
-               if (v)
-                       ++*pos;
                return iter->w.leaf;
        } else if (r < 0) {
                fib6_walker_unlink(net, &iter->w);
index 923034c52ce40d85c91a54d1b107e2088d31bb85..55bfc5149d0c5b5a6063dbdf19c2055a3b9aea93 100644 (file)
@@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
 
        /* TooBig packet may have updated dst->dev's mtu */
        if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
-               dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+               dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
                           NEXTHDR_GRE);
@@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
                dev->mtu -= 8;
 
        if (tunnel->parms.collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
                netif_keep_dst(dev);
        }
        ip6gre_tnl_init_features(dev);
@@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netif_keep_dst(dev);
@@ -2170,8 +2168,8 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
        [IFLA_GRE_OFLAGS]      = { .type = NLA_U16 },
        [IFLA_GRE_IKEY]        = { .type = NLA_U32 },
        [IFLA_GRE_OKEY]        = { .type = NLA_U32 },
-       [IFLA_GRE_LOCAL]       = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
-       [IFLA_GRE_REMOTE]      = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
+       [IFLA_GRE_LOCAL]       = { .len = sizeof_field(struct ipv6hdr, saddr) },
+       [IFLA_GRE_REMOTE]      = { .len = sizeof_field(struct ipv6hdr, daddr) },
        [IFLA_GRE_TTL]         = { .type = NLA_U8 },
        [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
        [IFLA_GRE_FLOWINFO]    = { .type = NLA_U32 },
@@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev)
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netif_keep_dst(dev);
index 754a484d35df6eb03b82593cbb66a78718e30326..b5dd20c4599bb1fc2515f6a78b8a6ee3287d03e5 100644 (file)
@@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (rel_info > dst_mtu(skb_dst(skb2)))
                        goto out;
 
-               skb_dst_update_pmtu(skb2, rel_info);
+               skb_dst_update_pmtu_no_confirm(skb2, rel_info);
        }
 
        icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -1132,7 +1132,7 @@ route_lookup:
        mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
                       IPV6_MIN_MTU : IPV4_MIN_MTU);
 
-       skb_dst_update_pmtu(skb, mtu);
+       skb_dst_update_pmtu_no_confirm(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
@@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
        if (err)
                return err;
        ip6_tnl_link_config(t);
-       if (t->parms.collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
+       if (t->parms.collect_md)
                netif_keep_dst(dev);
-       }
        return 0;
 }
 
index 024db17386d2ff8043269bd6f7a46f4535fba67f..524006aa0d78a6e38b46d2d828917b5f8c8d1386 100644 (file)
@@ -449,8 +449,17 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int err = -1;
        int mtu;
 
-       if (!dst)
-               goto tx_err_link_failure;
+       if (!dst) {
+               fl->u.ip6.flowi6_oif = dev->ifindex;
+               fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+               dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+               if (dst->error) {
+                       dst_release(dst);
+                       dst = NULL;
+                       goto tx_err_link_failure;
+               }
+               skb_dst_set(skb, dst);
+       }
 
        dst_hold(dst);
        dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
@@ -479,7 +488,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
index b59940416cb5776c7a77910fef029aef8e9491e7..affb51c11a25b0beaddae3c13b1d74b86dfec042 100644 (file)
@@ -95,7 +95,8 @@ static int            ip6_pkt_prohibit(struct sk_buff *skb);
 static int             ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static void            ip6_link_failure(struct sk_buff *skb);
 static void            ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                          struct sk_buff *skb, u32 mtu);
+                                          struct sk_buff *skb, u32 mtu,
+                                          bool confirm_neigh);
 static void            rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
@@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
 }
 
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                        struct sk_buff *skb, u32 mtu)
+                                        struct sk_buff *skb, u32 mtu,
+                                        bool confirm_neigh)
 {
 }
 
@@ -2692,7 +2694,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
 }
 
 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
-                                const struct ipv6hdr *iph, u32 mtu)
+                                const struct ipv6hdr *iph, u32 mtu,
+                                bool confirm_neigh)
 {
        const struct in6_addr *daddr, *saddr;
        struct rt6_info *rt6 = (struct rt6_info *)dst;
@@ -2710,7 +2713,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
                daddr = NULL;
                saddr = NULL;
        }
-       dst_confirm_neigh(dst, daddr);
+
+       if (confirm_neigh)
+               dst_confirm_neigh(dst, daddr);
+
        mtu = max_t(u32, mtu, IPV6_MIN_MTU);
        if (mtu >= dst_mtu(dst))
                return;
@@ -2764,9 +2770,11 @@ out_unlock:
 }
 
 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb, u32 mtu)
+                              struct sk_buff *skb, u32 mtu,
+                              bool confirm_neigh)
 {
-       __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+       __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
+                            confirm_neigh);
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
@@ -2785,7 +2793,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
-               __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
+               __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
        dst_release(dst);
 }
 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
index 85a5447a3e8d133c4f5e802115866222e7074446..7cbc19731997969bf7eec4ea5e1e8dd20d9e3da1 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/addrconf.h>
 #include <net/ip6_route.h>
 #include <net/dst_cache.h>
+#include <net/ip_tunnels.h>
 #ifdef CONFIG_IPV6_SEG6_HMAC
 #include <net/seg6_hmac.h>
 #endif
@@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
 
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
-       skb->encapsulation = 0;
+       if (iptunnel_pull_offloads(skb))
+               return false;
 
        return true;
 }
index b2ccbc4731277d5215d2c82c0d29d1387ae81e13..98954830c40baed84c6c3d123d299a227c2d1dee 100644 (file)
@@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                }
 
                if (tunnel->parms.iph.daddr)
-                       skb_dst_update_pmtu(skb, mtu);
+                       skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->len > mtu && !skb_is_gso(skb)) {
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
index 699e0730ce8e86545ef2812da316f8e726e4a75d..af7a4b8b1e9c4a46a0b0cf4245a981efa24b9152 100644 (file)
@@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 }
 
 static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                             struct sk_buff *skb, u32 mtu)
+                             struct sk_buff *skb, u32 mtu,
+                             bool confirm_neigh)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
        struct dst_entry *path = xdst->route;
 
-       path->ops->update_pmtu(path, sk, skb, mtu);
+       path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
 }
 
 static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
index ebb62a4ebe30d3bd6347f72711b23655cddc00c5..c4bdcbc84b07f0fd8516ccc46416aa801e4b6dfa 100644 (file)
@@ -50,7 +50,7 @@ static struct iucv_interface *pr_iucv;
 static const u8 iprm_shutdown[8] =
        {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
 
-#define TRGCLS_SIZE    FIELD_SIZEOF(struct iucv_message, class)
+#define TRGCLS_SIZE    sizeof_field(struct iucv_message, class)
 
 #define __iucv_sock_wait(sk, condition, timeo, ret)                    \
 do {                                                                   \
index 204a8351efffc86f566e51b27f173c8bdee6c4cb..c29170e767a8ca3964c8958cc99db5893302b4f1 100644 (file)
@@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
        return LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
               LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
               LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
-              !pdu->dsap ? 0 : 1;                      /* NULL DSAP value */
+              !pdu->dsap;                              /* NULL DSAP value */
 }
 
 static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
@@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
        return LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
               LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
               LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
-              !pdu->dsap ? 0 : 1;                      /* NULL DSAP */
+              !pdu->dsap;                              /* NULL DSAP */
 }
 
 static int llc_station_ac_send_xid_r(struct sk_buff *skb)
index 63cb0028b02d2ba20879fd7bab3cbeae42a46ad1..9fc2968856c0bf9019972f21f9e67a3c3463f50c 100644 (file)
@@ -442,7 +442,7 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
                        return 0;
 
                sband = hw->wiphy->bands[status->band];
-               if (!sband || status->rate_idx > sband->n_bitrates)
+               if (!sband || status->rate_idx >= sband->n_bitrates)
                        return 0;
 
                rate = &sband->bitrates[status->rate_idx];
index 4fb7f1f1210980df126c0560fd6cc824fc1388c9..000c742d05279a8bebb23b54b061e47669b5a4ae 100644 (file)
@@ -2954,6 +2954,28 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
        return err;
 }
 
+static void ieee80211_end_cac(struct wiphy *wiphy,
+                             struct net_device *dev)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+
+       mutex_lock(&local->mtx);
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /* it might be waiting for the local->mtx, but then
+                * by the time it gets it, sdata->wdev.cac_started
+                * will no longer be true
+                */
+               cancel_delayed_work(&sdata->dfs_cac_timer_work);
+
+               if (sdata->wdev.cac_started) {
+                       ieee80211_vif_release_channel(sdata);
+                       sdata->wdev.cac_started = false;
+               }
+       }
+       mutex_unlock(&local->mtx);
+}
+
 static struct cfg80211_beacon_data *
 cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
 {
@@ -4023,6 +4045,7 @@ const struct cfg80211_ops mac80211_config_ops = {
 #endif
        .get_channel = ieee80211_cfg_get_channel,
        .start_radar_detection = ieee80211_start_radar_detection,
+       .end_cac = ieee80211_end_cac,
        .channel_switch = ieee80211_channel_switch,
        .set_qos_map = ieee80211_set_qos_map,
        .set_ap_chanwidth = ieee80211_set_ap_chanwidth,
index b3c9001d1f43d92a87104738b2de972a7e5425e2..c80b1e163ea4e6e7405af33acbc02b4fa2c1343c 100644 (file)
@@ -201,8 +201,6 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
        char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
        u64 rx_airtime = 0, tx_airtime = 0;
        s64 deficit[IEEE80211_NUM_ACS];
-       u32 q_depth[IEEE80211_NUM_ACS];
-       u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS];
        ssize_t rv;
        int ac;
 
@@ -214,6 +212,56 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
                rx_airtime += sta->airtime[ac].rx_airtime;
                tx_airtime += sta->airtime[ac].tx_airtime;
                deficit[ac] = sta->airtime[ac].deficit;
+               spin_unlock_bh(&local->active_txq_lock[ac]);
+       }
+
+       p += scnprintf(p, bufsz + buf - p,
+               "RX: %llu us\nTX: %llu us\nWeight: %u\n"
+               "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
+               rx_airtime, tx_airtime, sta->airtime_weight,
+               deficit[0], deficit[1], deficit[2], deficit[3]);
+
+       rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+       kfree(buf);
+       return rv;
+}
+
+static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
+                                size_t count, loff_t *ppos)
+{
+       struct sta_info *sta = file->private_data;
+       struct ieee80211_local *local = sta->sdata->local;
+       int ac;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               spin_lock_bh(&local->active_txq_lock[ac]);
+               sta->airtime[ac].rx_airtime = 0;
+               sta->airtime[ac].tx_airtime = 0;
+               sta->airtime[ac].deficit = sta->airtime_weight;
+               spin_unlock_bh(&local->active_txq_lock[ac]);
+       }
+
+       return count;
+}
+STA_OPS_RW(airtime);
+
+static ssize_t sta_aql_read(struct file *file, char __user *userbuf,
+                               size_t count, loff_t *ppos)
+{
+       struct sta_info *sta = file->private_data;
+       struct ieee80211_local *local = sta->sdata->local;
+       size_t bufsz = 400;
+       char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
+       u32 q_depth[IEEE80211_NUM_ACS];
+       u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS];
+       ssize_t rv;
+       int ac;
+
+       if (!buf)
+               return -ENOMEM;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               spin_lock_bh(&local->active_txq_lock[ac]);
                q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
                q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
                spin_unlock_bh(&local->active_txq_lock[ac]);
@@ -221,12 +269,8 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
        }
 
        p += scnprintf(p, bufsz + buf - p,
-               "RX: %llu us\nTX: %llu us\nWeight: %u\n"
-               "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n"
                "Q depth: VO: %u us VI: %u us BE: %u us BK: %u us\n"
                "Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n",
-               rx_airtime, tx_airtime, sta->airtime_weight,
-               deficit[0], deficit[1], deficit[2], deficit[3],
                q_depth[0], q_depth[1], q_depth[2], q_depth[3],
                q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1],
                q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]),
@@ -236,11 +280,10 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
        return rv;
 }
 
-static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
+static ssize_t sta_aql_write(struct file *file, const char __user *userbuf,
                                 size_t count, loff_t *ppos)
 {
        struct sta_info *sta = file->private_data;
-       struct ieee80211_local *local = sta->sdata->local;
        u32 ac, q_limit_l, q_limit_h;
        char _buf[100] = {}, *buf = _buf;
 
@@ -251,7 +294,7 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
                return -EFAULT;
 
        buf[sizeof(_buf) - 1] = '\0';
-       if (sscanf(buf, "queue limit %u %u %u", &ac, &q_limit_l, &q_limit_h)
+       if (sscanf(buf, "limit %u %u %u", &ac, &q_limit_l, &q_limit_h)
            != 3)
                return -EINVAL;
 
@@ -261,17 +304,10 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
        sta->airtime[ac].aql_limit_low = q_limit_l;
        sta->airtime[ac].aql_limit_high = q_limit_h;
 
-       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               spin_lock_bh(&local->active_txq_lock[ac]);
-               sta->airtime[ac].rx_airtime = 0;
-               sta->airtime[ac].tx_airtime = 0;
-               sta->airtime[ac].deficit = sta->airtime_weight;
-               spin_unlock_bh(&local->active_txq_lock[ac]);
-       }
-
        return count;
 }
-STA_OPS_RW(airtime);
+STA_OPS_RW(aql);
+
 
 static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
                                        size_t count, loff_t *ppos)
@@ -996,6 +1032,10 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
                                    NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
                DEBUGFS_ADD(airtime);
 
+       if (wiphy_ext_feature_isset(local->hw.wiphy,
+                                   NL80211_EXT_FEATURE_AQL))
+               DEBUGFS_ADD(aql);
+
        debugfs_create_xul("driver_buffered_tids", 0400, sta->debugfs_dir,
                           &sta->driver_buffered_tids);
 
index 6cca0853f183f8737846c272418b4fe37ceb8dbd..4c2b5ba3ac094ff45718bda3a7b7fdd0431df959 100644 (file)
@@ -672,9 +672,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
                        IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
        }
 
-       local->airtime_flags = AIRTIME_USE_TX |
-                              AIRTIME_USE_RX |
-                              AIRTIME_USE_AQL;
+       local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX;
        local->aql_threshold = IEEE80211_AQL_THRESHOLD;
        atomic_set(&local->aql_total_pending_airtime, 0);
 
index 68af6230638588961d3839b028f027cf690949bc..d699833703819af130c54803528aa7808e4f2c54 100644 (file)
@@ -328,6 +328,9 @@ u32 airtime_link_metric_get(struct ieee80211_local *local,
        unsigned long fail_avg =
                ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
 
+       if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+               return MAX_METRIC;
+
        /* Try to get rate based on HW/SW RC algorithm.
         * Rate is returned in units of Kbps, correct this
         * to comply with airtime calculation units
index 8eafd81e97b4858453f88cd692e2c949f1579b6e..0f5f406788852e2ee052fbeac52f236a081534a2 100644 (file)
@@ -1916,6 +1916,9 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
 {
        int tx_pending;
 
+       if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
+               return;
+
        if (!tx_completed) {
                if (sta)
                        atomic_add(tx_airtime,
index ad5d8a4ae56d1e8eac382c7eb1e700d859e47860..c00e28585f9dbf92be06aef1245d05a987e1dc4f 100644 (file)
@@ -127,7 +127,6 @@ enum ieee80211_agg_stop_reason {
 /* Debugfs flags to enable/disable use of RX/TX airtime in scheduler */
 #define AIRTIME_USE_TX         BIT(0)
 #define AIRTIME_USE_RX         BIT(1)
-#define AIRTIME_USE_AQL                BIT(2)
 
 struct airtime_info {
        u64 rx_airtime;
index 727dc9f3f3b3a769d2764c529d407073e1d5d825..e7f57bb18f6e00255e75a465cd7ddb1c7282e6f4 100644 (file)
@@ -263,9 +263,21 @@ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx,
        if ((keyid >> 6) != key->conf.keyidx)
                return TKIP_DECRYPT_INVALID_KEYIDX;
 
-       if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT &&
-           (iv32 < rx_ctx->iv32 ||
-            (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16)))
+       /* Reject replays if the received TSC is smaller than or equal to the
+        * last received value in a valid message, but with an exception for
+        * the case where a new key has been set and no valid frame using that
+        * key has yet received and the local RSC was initialized to 0. This
+        * exception allows the very first frame sent by the transmitter to be
+        * accepted even if that transmitter were to use TSC 0 (IEEE 802.11
+        * described TSC to be initialized to 1 whenever a new key is taken into
+        * use).
+        */
+       if (iv32 < rx_ctx->iv32 ||
+           (iv32 == rx_ctx->iv32 &&
+            (iv16 < rx_ctx->iv16 ||
+             (iv16 == rx_ctx->iv16 &&
+              (rx_ctx->iv32 || rx_ctx->iv16 ||
+               rx_ctx->ctx.state != TKIP_STATE_NOT_INIT)))))
                return TKIP_DECRYPT_REPLAY;
 
        if (only_iv) {
index 4768322dc2028bcf65e4f3e590252a926865a083..427f51a0a9945a8d885af104f350e821f666336e 100644 (file)
@@ -408,20 +408,20 @@ TRACE_EVENT(drv_bss_info_changed,
                __field(u32, basic_rates)
                __array(int, mcast_rate, NUM_NL80211_BANDS)
                __field(u16, ht_operation_mode)
-               __field(s32, cqm_rssi_thold);
-               __field(s32, cqm_rssi_hyst);
-               __field(u32, channel_width);
-               __field(u32, channel_cfreq1);
+               __field(s32, cqm_rssi_thold)
+               __field(s32, cqm_rssi_hyst)
+               __field(u32, channel_width)
+               __field(u32, channel_cfreq1)
                __dynamic_array(u32, arp_addr_list,
                                info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
                                        IEEE80211_BSS_ARP_ADDR_LIST_LEN :
-                                       info->arp_addr_cnt);
-               __field(int, arp_addr_cnt);
-               __field(bool, qos);
-               __field(bool, idle);
-               __field(bool, ps);
-               __dynamic_array(u8, ssid, info->ssid_len);
-               __field(bool, hidden_ssid);
+                                       info->arp_addr_cnt)
+               __field(int, arp_addr_cnt)
+               __field(bool, qos)
+               __field(bool, idle)
+               __field(bool, ps)
+               __dynamic_array(u8, ssid, info->ssid_len)
+               __field(bool, hidden_ssid)
                __field(int, txpower)
                __field(u8, p2p_oppps_ctwindow)
        ),
@@ -1672,8 +1672,8 @@ TRACE_EVENT(drv_start_ap,
                VIF_ENTRY
                __field(u8, dtimper)
                __field(u16, bcnint)
-               __dynamic_array(u8, ssid, info->ssid_len);
-               __field(bool, hidden_ssid);
+               __dynamic_array(u8, ssid, info->ssid_len)
+               __field(bool, hidden_ssid)
        ),
 
        TP_fast_assign(
@@ -1739,7 +1739,7 @@ TRACE_EVENT(drv_join_ibss,
                VIF_ENTRY
                __field(u8, dtimper)
                __field(u16, bcnint)
-               __dynamic_array(u8, ssid, info->ssid_len);
+               __dynamic_array(u8, ssid, info->ssid_len)
        ),
 
        TP_fast_assign(
index b696b9136f4c1f5132e22179bcd128a18bed6d8a..a8a7306a1f56c01bfaad49c412bc20222d642c61 100644 (file)
@@ -2256,6 +2256,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
                                                    payload[7]);
        }
 
+       /*
+        * Initialize skb->priority for QoS frames. This is put in the TID field
+        * of the frame before passing it to the driver.
+        */
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *p = ieee80211_get_qos_ctl(hdr);
+               skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+       }
+
        memset(info, 0, sizeof(*info));
 
        info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
@@ -3668,7 +3677,7 @@ begin:
 
        IEEE80211_SKB_CB(skb)->control.vif = vif;
 
-       if (local->airtime_flags & AIRTIME_USE_AQL) {
+       if (wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
                u32 airtime;
 
                airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
@@ -3790,7 +3799,7 @@ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
        struct sta_info *sta;
        struct ieee80211_local *local = hw_to_local(hw);
 
-       if (!(local->airtime_flags & AIRTIME_USE_AQL))
+       if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
                return true;
 
        if (!txq->sta)
index 1abd6f0dc227f072c5d09aa03b4e71fdfe8d466f..26ab0e9612d82579b6e89c99704c5bbe063c0e1b 100644 (file)
@@ -60,9 +60,9 @@ mtype_destroy(struct ip_set *set)
        if (SET_WITH_TIMEOUT(set))
                del_timer_sync(&map->gc);
 
-       ip_set_free(map->members);
        if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
                mtype_ext_cleanup(set);
+       ip_set_free(map->members);
        ip_set_free(map);
 
        set->data = NULL;
@@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
 
        if (set->extensions & IPSET_EXT_DESTROY)
                mtype_ext_cleanup(set);
-       memset(map->members, 0, map->memsize);
+       bitmap_zero(map->members, map->elements);
        set->elements = 0;
        set->ext_size = 0;
 }
index abe8f77d7d23cd003ad022e57179d410a5074d2a..0a2196f591064e68754b34548ff614d61a3d44e5 100644 (file)
@@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
 
 /* Type structure */
 struct bitmap_ip {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
            u32 first_ip, u32 last_ip,
            u32 elements, u32 hosts, u8 netmask)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_ip = first_ip;
@@ -322,7 +322,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (!map)
                return -ENOMEM;
 
-       map->memsize = bitmap_bytes(0, elements - 1);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ip;
        if (!init_map_ip(set, map, first_ip, last_ip,
                         elements, hosts, netmask)) {
index b618713297da56178f84d309c0e17242f0220c30..739e343efaf63193a23666e52b615fc40658f681 100644 (file)
@@ -42,7 +42,7 @@ enum {
 
 /* Type structure */
 struct bitmap_ipmac {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -299,7 +299,7 @@ static bool
 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
               u32 first_ip, u32 last_ip, u32 elements)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_ip = first_ip;
@@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (!map)
                return -ENOMEM;
 
-       map->memsize = bitmap_bytes(0, elements - 1);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ipmac;
        if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
                kfree(map);
index 23d6095cb196d0aadde8d6266cc06a086dd57940..b49978dd810dcb7794e4ae6c760adaff0f9fb200 100644 (file)
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
 
 /* Type structure */
 struct bitmap_port {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u16 first_port;         /* host byte order, included in range */
        u16 last_port;          /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -231,7 +231,7 @@ static bool
 init_map_port(struct ip_set *set, struct bitmap_port *map,
              u16 first_port, u16 last_port)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_port = first_port;
@@ -271,7 +271,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
                return -ENOMEM;
 
        map->elements = elements;
-       map->memsize = bitmap_bytes(0, map->elements);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_port;
        if (!init_map_port(set, map, first_port, last_port)) {
                kfree(map);
index 169e0a04f814b2187f909723975f946cf76ed8dd..cf895bc808713c677e5dcf1d2139b51e30dacc9a 100644 (file)
@@ -1848,6 +1848,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
        int ret = 0;
+       u32 lineno;
 
        if (unlikely(protocol_min_failed(attr) ||
                     !attr[IPSET_ATTR_SETNAME] ||
@@ -1864,7 +1865,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
                return -IPSET_ERR_PROTOCOL;
 
        rcu_read_lock_bh();
-       ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
+       ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
        rcu_read_unlock_bh();
        /* Userspace can't trigger element to be re-added */
        if (ret == -EAGAIN)
index 8dc892a9dc91a9bbd0b11bf53c3da506de548fd2..605e0f68f8bd30a44b686bbbb6d625be79c23247 100644 (file)
@@ -1239,7 +1239,7 @@ static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
 
                        p = msg_end;
                        if (p + sizeof(s->v4) > buffer+buflen) {
-                               IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n");
                                return;
                        }
                        s = (union ip_vs_sync_conn *)p;
index b1e300f8881b20dd8bc0c9bb2206ca911ab6bd73..b00866d777fe0e9ed8018087ebc664c56f29b5c9 100644 (file)
@@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
        struct rtable *ort = skb_rtable(skb);
 
        if (!skb->dev && sk && sk_fullsock(sk))
-               ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+               ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
 }
 
 static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
index 0af1898af2b808282e7f84d38e00493512a803bb..f475fec84536a3e7bf61b771fa5a6a7a9f06288e 100644 (file)
@@ -895,9 +895,10 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 }
 
 /* Resolve race on insertion if this protocol allows this. */
-static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
-                              enum ip_conntrack_info ctinfo,
-                              struct nf_conntrack_tuple_hash *h)
+static __cold noinline int
+nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
+                   enum ip_conntrack_info ctinfo,
+                   struct nf_conntrack_tuple_hash *h)
 {
        /* This is the conntrack entry already in hashes that won race. */
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
index d8d33ef52ce099546cccf5c52423d6474378b594..6a1c8f1f61718cc90187f8c5c8c3f89293896f04 100644 (file)
@@ -3626,6 +3626,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
 
        list_for_each_entry(net, net_exit_list, exit_list)
                ctnetlink_net_exit(net);
+
+       /* wait for other cpus until they are done with ctnl_notifiers */
+       synchronize_rcu();
 }
 
 static struct pernet_operations ctnetlink_net_ops = {
index b6b14db3955bfb55a523534ceb8cb7d3716f685a..b3f4a334f9d78d77d5912315290bf32f5390232d 100644 (file)
@@ -677,6 +677,9 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
        unsigned int *timeouts = data;
        int i;
 
+       if (!timeouts)
+                timeouts = dn->dccp_timeout;
+
        /* set default DCCP timeouts. */
        for (i=0; i<CT_DCCP_MAX; i++)
                timeouts[i] = dn->dccp_timeout[i];
index fce3d93f154181e9b5a884f2afedde818fa09bc2..4f897b14b6069d4e15df5d22fa89a47e9a892fab 100644 (file)
@@ -114,7 +114,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        {
 /*     ORIGINAL        */
 /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
 /* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
 /* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
 /* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
@@ -130,7 +130,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
 /*     REPLY   */
 /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
 /* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
 /* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
 /* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
 /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
@@ -316,7 +316,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                        ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
                }
 
-               ct->proto.sctp.state = new_state;
+               ct->proto.sctp.state = SCTP_CONNTRACK_NONE;
        }
 
        return true;
@@ -594,6 +594,9 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
        struct nf_sctp_net *sn = nf_sctp_pernet(net);
        int i;
 
+       if (!timeouts)
+               timeouts = sn->timeouts;
+
        /* set default SCTP timeouts. */
        for (i=0; i<SCTP_CONNTRACK_MAX; i++)
                timeouts[i] = sn->timeouts[i];
index 9889d52eda8203a5e247ef2e5d2c477f05721e6d..e33a73cb1f42ed92e2dc3056d3f3cf807f23d449 100644 (file)
@@ -134,11 +134,6 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT        (120 * HZ)
 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT        (30 * HZ)
 
-static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
-{
-       return (__s32)(timeout - (u32)jiffies);
-}
-
 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
 {
        const struct nf_conntrack_l4proto *l4proto;
@@ -232,7 +227,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        int err;
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
 
        err = rhashtable_insert_fast(&flow_table->rhashtable,
                                     &flow->tuplehash[0].node,
index b9e7dd6e60ce2b1327709fb38a0eab5e7e2d1af6..7ea2ddc2aa930ced6efda1d02b2a1c5dc092966e 100644 (file)
@@ -280,7 +280,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
        if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
                return NF_DROP;
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
        iph = ip_hdr(skb);
        ip_decrease_ttl(iph);
        skb->tstamp = 0;
@@ -509,7 +509,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
        if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
                return NF_DROP;
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
        ip6h = ipv6_hdr(skb);
        ip6h->hop_limit--;
        skb->tstamp = 0;
index c54c9a6cc981ba6a7e0a6140763946460b11adb4..d06969af1085e571fadd5940c028b64685b9f6c9 100644 (file)
@@ -28,6 +28,7 @@ struct nf_flow_key {
        struct flow_dissector_key_basic                 basic;
        union {
                struct flow_dissector_key_ipv4_addrs    ipv4;
+               struct flow_dissector_key_ipv6_addrs    ipv6;
        };
        struct flow_dissector_key_tcp                   tcp;
        struct flow_dissector_key_ports                 tp;
@@ -57,6 +58,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
        NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
        NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
        NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+       NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
        NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
        NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
 
@@ -69,15 +71,24 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
                key->ipv4.dst = tuple->dst_v4.s_addr;
                mask->ipv4.dst = 0xffffffff;
                break;
+       case AF_INET6:
+               key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+               key->basic.n_proto = htons(ETH_P_IPV6);
+               key->ipv6.src = tuple->src_v6;
+               memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
+               key->ipv6.dst = tuple->dst_v6;
+               memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
+               break;
        default:
                return -EOPNOTSUPP;
        }
+       match->dissector.used_keys |= BIT(key->control.addr_type);
        mask->basic.n_proto = 0xffff;
 
        switch (tuple->l4proto) {
        case IPPROTO_TCP:
                key->tcp.flags = 0;
-               mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN;
+               mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
                match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
                break;
        case IPPROTO_UDP:
@@ -96,14 +107,13 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
 
        match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
                                      BIT(FLOW_DISSECTOR_KEY_BASIC) |
-                                     BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
                                      BIT(FLOW_DISSECTOR_KEY_PORTS);
        return 0;
 }
 
 static void flow_offload_mangle(struct flow_action_entry *entry,
-                               enum flow_action_mangle_base htype,
-                               u32 offset, u8 *value, u8 *mask)
+                               enum flow_action_mangle_base htype, u32 offset,
+                               const __be32 *value, const __be32 *mask)
 {
        entry->id = FLOW_ACTION_MANGLE;
        entry->mangle.htype = htype;
@@ -140,12 +150,12 @@ static int flow_offload_eth_src(struct net *net,
        memcpy(&val16, dev->dev_addr, 2);
        val = val16 << 16;
        flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
-                           (u8 *)&val, (u8 *)&mask);
+                           &val, &mask);
 
        mask = ~0xffffffff;
        memcpy(&val, dev->dev_addr + 2, 4);
        flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
-                           (u8 *)&val, (u8 *)&mask);
+                           &val, &mask);
        dev_put(dev);
 
        return 0;
@@ -156,27 +166,41 @@ static int flow_offload_eth_dst(struct net *net,
                                enum flow_offload_tuple_dir dir,
                                struct nf_flow_rule *flow_rule)
 {
-       const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
        struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
        struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+       const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
+       const struct dst_entry *dst_cache;
+       unsigned char ha[ETH_ALEN];
        struct neighbour *n;
        u32 mask, val;
+       u8 nud_state;
        u16 val16;
 
-       n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
+       dst_cache = flow->tuplehash[dir].tuple.dst_cache;
+       n = dst_neigh_lookup(dst_cache, daddr);
        if (!n)
                return -ENOENT;
 
+       read_lock_bh(&n->lock);
+       nud_state = n->nud_state;
+       ether_addr_copy(ha, n->ha);
+       read_unlock_bh(&n->lock);
+
+       if (!(nud_state & NUD_VALID)) {
+               neigh_release(n);
+               return -ENOENT;
+       }
+
        mask = ~0xffffffff;
-       memcpy(&val, n->ha, 4);
+       memcpy(&val, ha, 4);
        flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
-                           (u8 *)&val, (u8 *)&mask);
+                           &val, &mask);
 
        mask = ~0x0000ffff;
-       memcpy(&val16, n->ha + 4, 2);
+       memcpy(&val16, ha + 4, 2);
        val = val16;
        flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
-                           (u8 *)&val, (u8 *)&mask);
+                           &val, &mask);
        neigh_release(n);
 
        return 0;
@@ -206,7 +230,7 @@ static void flow_offload_ipv4_snat(struct net *net,
        }
 
        flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
-                           (u8 *)&addr, (u8 *)&mask);
+                           &addr, &mask);
 }
 
 static void flow_offload_ipv4_dnat(struct net *net,
@@ -233,12 +257,12 @@ static void flow_offload_ipv4_dnat(struct net *net,
        }
 
        flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
-                           (u8 *)&addr, (u8 *)&mask);
+                           &addr, &mask);
 }
 
 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
                                     unsigned int offset,
-                                    u8 *addr, u8 *mask)
+                                    const __be32 *addr, const __be32 *mask)
 {
        struct flow_action_entry *entry;
        int i;
@@ -246,8 +270,7 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
        for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
                entry = flow_action_entry_next(flow_rule);
                flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
-                                   offset + i,
-                                   &addr[i], mask);
+                                   offset + i, &addr[i], mask);
        }
 }
 
@@ -257,23 +280,23 @@ static void flow_offload_ipv6_snat(struct net *net,
                                   struct nf_flow_rule *flow_rule)
 {
        u32 mask = ~htonl(0xffffffff);
-       const u8 *addr;
+       const __be32 *addr;
        u32 offset;
 
        switch (dir) {
        case FLOW_OFFLOAD_DIR_ORIGINAL:
-               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr;
+               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
                offset = offsetof(struct ipv6hdr, saddr);
                break;
        case FLOW_OFFLOAD_DIR_REPLY:
-               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr;
+               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
                offset = offsetof(struct ipv6hdr, daddr);
                break;
        default:
                return;
        }
 
-       flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
+       flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
 }
 
 static void flow_offload_ipv6_dnat(struct net *net,
@@ -282,23 +305,23 @@ static void flow_offload_ipv6_dnat(struct net *net,
                                   struct nf_flow_rule *flow_rule)
 {
        u32 mask = ~htonl(0xffffffff);
-       const u8 *addr;
+       const __be32 *addr;
        u32 offset;
 
        switch (dir) {
        case FLOW_OFFLOAD_DIR_ORIGINAL:
-               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr;
+               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
                offset = offsetof(struct ipv6hdr, daddr);
                break;
        case FLOW_OFFLOAD_DIR_REPLY:
-               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr;
+               addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
                offset = offsetof(struct ipv6hdr, saddr);
                break;
        default:
                return;
        }
 
-       flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
+       flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
 }
 
 static int flow_offload_l4proto(const struct flow_offload *flow)
@@ -326,25 +349,28 @@ static void flow_offload_port_snat(struct net *net,
                                   struct nf_flow_rule *flow_rule)
 {
        struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
-       u32 mask = ~htonl(0xffff0000);
-       __be16 port;
+       u32 mask, port;
        u32 offset;
 
        switch (dir) {
        case FLOW_OFFLOAD_DIR_ORIGINAL:
-               port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
                offset = 0; /* offsetof(struct tcphdr, source); */
+               port = htonl(port << 16);
+               mask = ~htonl(0xffff0000);
                break;
        case FLOW_OFFLOAD_DIR_REPLY:
-               port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
                offset = 0; /* offsetof(struct tcphdr, dest); */
+               port = htonl(port);
+               mask = ~htonl(0xffff);
                break;
        default:
-               break;
+               return;
        }
 
        flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
-                           (u8 *)&port, (u8 *)&mask);
+                           &port, &mask);
 }
 
 static void flow_offload_port_dnat(struct net *net,
@@ -353,25 +379,28 @@ static void flow_offload_port_dnat(struct net *net,
                                   struct nf_flow_rule *flow_rule)
 {
        struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
-       u32 mask = ~htonl(0xffff);
-       __be16 port;
+       u32 mask, port;
        u32 offset;
 
        switch (dir) {
        case FLOW_OFFLOAD_DIR_ORIGINAL:
-               port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
-               offset = 0; /* offsetof(struct tcphdr, source); */
+               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
+               offset = 0; /* offsetof(struct tcphdr, dest); */
+               port = htonl(port);
+               mask = ~htonl(0xffff);
                break;
        case FLOW_OFFLOAD_DIR_REPLY:
-               port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
-               offset = 0; /* offsetof(struct tcphdr, dest); */
+               port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
+               offset = 0; /* offsetof(struct tcphdr, source); */
+               port = htonl(port << 16);
+               mask = ~htonl(0xffff0000);
                break;
        default:
-               break;
+               return;
        }
 
        flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
-                           (u8 *)&port, (u8 *)&mask);
+                           &port, &mask);
 }
 
 static void flow_offload_ipv4_checksum(struct net *net,
@@ -574,7 +603,7 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
        cls_flow.rule = flow_rule->rule;
 
        list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
-               err = block_cb->cb(TC_SETUP_FT, &cls_flow,
+               err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
                                   block_cb->cb_priv);
                if (err < 0)
                        continue;
@@ -599,7 +628,7 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload,
                             &offload->flow->tuplehash[dir].tuple, &extack);
 
        list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
-               block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
+               block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
 
        offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
 }
@@ -656,7 +685,7 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload,
                             &offload->flow->tuplehash[dir].tuple, &extack);
 
        list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
-               block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
+               block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
        memcpy(stats, &cls_flow.stats, sizeof(*stats));
 }
 
@@ -752,9 +781,9 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
                           struct flow_offload *flow)
 {
        struct flow_offload_work *offload;
-       s64 delta;
+       __s32 delta;
 
-       delta = flow->timeout - jiffies;
+       delta = nf_flow_timeout_delta(flow->timeout);
        if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
            flow->flags & FLOW_OFFLOAD_HW_DYING)
                return;
@@ -822,7 +851,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
        bo.extack       = &extack;
        INIT_LIST_HEAD(&bo.cb_list);
 
-       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo);
        if (err < 0)
                return err;
 
index 0a59c14b5177645f3bbfd847dc5db3500e3b1961..64eedc17037ad783d3a99851038e91394465617e 100644 (file)
@@ -233,6 +233,19 @@ icmp_manip_pkt(struct sk_buff *skb,
                return false;
 
        hdr = (struct icmphdr *)(skb->data + hdroff);
+       switch (hdr->type) {
+       case ICMP_ECHO:
+       case ICMP_ECHOREPLY:
+       case ICMP_TIMESTAMP:
+       case ICMP_TIMESTAMPREPLY:
+       case ICMP_INFO_REQUEST:
+       case ICMP_INFO_REPLY:
+       case ICMP_ADDRESS:
+       case ICMP_ADDRESSREPLY:
+               break;
+       default:
+               return true;
+       }
        inet_proto_csum_replace2(&hdr->checksum, skb,
                                 hdr->un.echo.id, tuple->src.u.icmp.id, false);
        hdr->un.echo.id = tuple->src.u.icmp.id;
index a2b58de8260074b86b6a4b51d0e76dc281ef3318..f8f52ff99cfb01d11b3541f046aadb94c098f345 100644 (file)
@@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
                goto err;
        }
 
-       if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
+       if (skb_dst(skb) && !skb_dst_force(skb)) {
                status = -ENETDOWN;
                goto err;
        }
index 062b73a83af0efd2537669ec515312f50815d041..7e63b481cc86be8d95e92eae750c091036238f87 100644 (file)
@@ -22,6 +22,8 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 
+#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+
 static LIST_HEAD(nf_tables_expressions);
 static LIST_HEAD(nf_tables_objects);
 static LIST_HEAD(nf_tables_flowtables);
@@ -550,47 +552,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
 
 static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
 
+static const struct nft_chain_type *
+__nft_chain_type_get(u8 family, enum nft_chain_types type)
+{
+       if (family >= NFPROTO_NUMPROTO ||
+           type >= NFT_CHAIN_T_MAX)
+               return NULL;
+
+       return chain_type[family][type];
+}
+
 static const struct nft_chain_type *
 __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
 {
+       const struct nft_chain_type *type;
        int i;
 
        for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
-               if (chain_type[family][i] != NULL &&
-                   !nla_strcmp(nla, chain_type[family][i]->name))
-                       return chain_type[family][i];
+               type = __nft_chain_type_get(family, i);
+               if (!type)
+                       continue;
+               if (!nla_strcmp(nla, type->name))
+                       return type;
        }
        return NULL;
 }
 
-/*
- * Loading a module requires dropping mutex that guards the
- * transaction.
- * We first need to abort any pending transactions as once
- * mutex is unlocked a different client could start a new
- * transaction.  It must not see any 'future generation'
- * changes * as these changes will never happen.
- */
-#ifdef CONFIG_MODULES
-static int __nf_tables_abort(struct net *net);
+struct nft_module_request {
+       struct list_head        list;
+       char                    module[MODULE_NAME_LEN];
+       bool                    done;
+};
 
-static void nft_request_module(struct net *net, const char *fmt, ...)
+#ifdef CONFIG_MODULES
+static int nft_request_module(struct net *net, const char *fmt, ...)
 {
        char module_name[MODULE_NAME_LEN];
+       struct nft_module_request *req;
        va_list args;
        int ret;
 
-       __nf_tables_abort(net);
-
        va_start(args, fmt);
        ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
        va_end(args);
-       if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret))
-               return;
+       if (ret >= MODULE_NAME_LEN)
+               return 0;
 
-       mutex_unlock(&net->nft.commit_mutex);
-       request_module("%s", module_name);
-       mutex_lock(&net->nft.commit_mutex);
+       list_for_each_entry(req, &net->nft.module_list, list) {
+               if (!strcmp(req->module, module_name)) {
+                       if (req->done)
+                               return 0;
+
+                       /* A request to load this module already exists. */
+                       return -EAGAIN;
+               }
+       }
+
+       req = kmalloc(sizeof(*req), GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
+
+       req->done = false;
+       strlcpy(req->module, module_name, MODULE_NAME_LEN);
+       list_add_tail(&req->list, &net->nft.module_list);
+
+       return -EAGAIN;
 }
 #endif
 
@@ -614,10 +640,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (autoload) {
-               nft_request_module(net, "nft-chain-%u-%.*s", family,
-                                  nla_len(nla), (const char *)nla_data(nla));
-               type = __nf_tables_chain_type_lookup(nla, family);
-               if (type != NULL)
+               if (nft_request_module(net, "nft-chain-%u-%.*s", family,
+                                      nla_len(nla),
+                                      (const char *)nla_data(nla)) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -1045,12 +1070,18 @@ static int nft_flush_table(struct nft_ctx *ctx)
        }
 
        list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
+               if (!nft_is_active_next(ctx->net, flowtable))
+                       continue;
+
                err = nft_delflowtable(ctx, flowtable);
                if (err < 0)
                        goto out;
        }
 
        list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
+               if (!nft_is_active_next(ctx->net, obj))
+                       continue;
+
                err = nft_delobj(ctx, obj);
                if (err < 0)
                        goto out;
@@ -1153,11 +1184,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
 
 void nft_register_chain_type(const struct nft_chain_type *ctype)
 {
-       if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
-               return;
-
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
+       if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
                nfnl_unlock(NFNL_SUBSYS_NFTABLES);
                return;
        }
@@ -1241,7 +1269,8 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
        [NFTA_CHAIN_HOOK]       = { .type = NLA_NESTED },
        [NFTA_CHAIN_POLICY]     = { .type = NLA_U32 },
-       [NFTA_CHAIN_TYPE]       = { .type = NLA_STRING },
+       [NFTA_CHAIN_TYPE]       = { .type = NLA_STRING,
+                                   .len = NFT_MODULE_AUTOLOAD_LIMIT },
        [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
        [NFTA_CHAIN_FLAGS]      = { .type = NLA_U32 },
 };
@@ -1676,6 +1705,7 @@ static int nf_tables_parse_netdev_hooks(struct net *net,
                        goto err_hook;
                }
                if (nft_hook_list_find(hook_list, hook)) {
+                       kfree(hook);
                        err = -EEXIST;
                        goto err_hook;
                }
@@ -1757,7 +1787,10 @@ static int nft_chain_parse_hook(struct net *net,
        hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
        hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
 
-       type = chain_type[family][NFT_CHAIN_T_DEFAULT];
+       type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
+       if (!type)
+               return -EOPNOTSUPP;
+
        if (nla[NFTA_CHAIN_TYPE]) {
                type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
                                                   family, autoload);
@@ -2317,9 +2350,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
 static int nft_expr_type_request_module(struct net *net, u8 family,
                                        struct nlattr *nla)
 {
-       nft_request_module(net, "nft-expr-%u-%.*s", family,
-                          nla_len(nla), (char *)nla_data(nla));
-       if (__nft_expr_type_get(family, nla))
+       if (nft_request_module(net, "nft-expr-%u-%.*s", family,
+                              nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
                return -EAGAIN;
 
        return 0;
@@ -2345,9 +2377,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
                if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
 
-               nft_request_module(net, "nft-expr-%.*s",
-                                  nla_len(nla), (char *)nla_data(nla));
-               if (__nft_expr_type_get(family, nla))
+               if (nft_request_module(net, "nft-expr-%.*s",
+                                      nla_len(nla),
+                                      (char *)nla_data(nla)) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -2355,7 +2387,8 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
 }
 
 static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
-       [NFTA_EXPR_NAME]        = { .type = NLA_STRING },
+       [NFTA_EXPR_NAME]        = { .type = NLA_STRING,
+                                   .len = NFT_MODULE_AUTOLOAD_LIMIT },
        [NFTA_EXPR_DATA]        = { .type = NLA_NESTED },
 };
 
@@ -2437,9 +2470,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
                        err = PTR_ERR(ops);
 #ifdef CONFIG_MODULES
                        if (err == -EAGAIN)
-                               nft_expr_type_request_module(ctx->net,
-                                                            ctx->family,
-                                                            tb[NFTA_EXPR_NAME]);
+                               if (nft_expr_type_request_module(ctx->net,
+                                                                ctx->family,
+                                                                tb[NFTA_EXPR_NAME]) != -EAGAIN)
+                                       err = -ENOENT;
 #endif
                        goto err1;
                }
@@ -3276,8 +3310,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (list_empty(&nf_tables_set_types)) {
-               nft_request_module(ctx->net, "nft-set");
-               if (!list_empty(&nf_tables_set_types))
+               if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -4198,7 +4231,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
        [NFTA_SET_ELEM_USERDATA]        = { .type = NLA_BINARY,
                                            .len = NFT_USERDATA_MAXLEN },
        [NFTA_SET_ELEM_EXPR]            = { .type = NLA_NESTED },
-       [NFTA_SET_ELEM_OBJREF]          = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_OBJREF]          = { .type = NLA_STRING,
+                                           .len = NFT_OBJ_MAXNAMELEN - 1 },
 };
 
 static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
@@ -4519,8 +4553,10 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                return err;
 
        err = -EINVAL;
-       if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
+       if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) {
+               nft_data_release(&elem.key.val, desc.type);
                return err;
+       }
 
        priv = set->ops->get(ctx->net, set, &elem, flags);
        if (IS_ERR(priv))
@@ -4756,14 +4792,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                if (nla[NFTA_SET_ELEM_DATA] == NULL &&
                    !(flags & NFT_SET_ELEM_INTERVAL_END))
                        return -EINVAL;
-               if (nla[NFTA_SET_ELEM_DATA] != NULL &&
-                   flags & NFT_SET_ELEM_INTERVAL_END)
-                       return -EINVAL;
        } else {
                if (nla[NFTA_SET_ELEM_DATA] != NULL)
                        return -EINVAL;
        }
 
+       if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
+            (nla[NFTA_SET_ELEM_DATA] ||
+             nla[NFTA_SET_ELEM_OBJREF] ||
+             nla[NFTA_SET_ELEM_TIMEOUT] ||
+             nla[NFTA_SET_ELEM_EXPIRATION] ||
+             nla[NFTA_SET_ELEM_USERDATA] ||
+             nla[NFTA_SET_ELEM_EXPR]))
+               return -EINVAL;
+
        timeout = 0;
        if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
                if (!(set->flags & NFT_SET_TIMEOUT))
@@ -5394,8 +5436,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (type == NULL) {
-               nft_request_module(net, "nft-obj-%u", objtype);
-               if (__nft_obj_type_get(objtype))
+               if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -5476,7 +5517,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
 
-               type = nft_obj_type_get(net, objtype);
+               type = __nft_obj_type_get(objtype);
                nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
 
                return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
@@ -5968,14 +6009,14 @@ nft_flowtable_type_get(struct net *net, u8 family)
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (type == NULL) {
-               nft_request_module(net, "nf-flowtable-%u", family);
-               if (__nft_flowtable_type_get(family))
+               if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
        return ERR_PTR(-ENOENT);
 }
 
+/* Only called from error and netdev event paths. */
 static void nft_unregister_flowtable_hook(struct net *net,
                                          struct nft_flowtable *flowtable,
                                          struct nft_hook *hook)
@@ -5991,7 +6032,7 @@ static void nft_unregister_flowtable_net_hooks(struct net *net,
        struct nft_hook *hook;
 
        list_for_each_entry(hook, &flowtable->hook_list, list)
-               nft_unregister_flowtable_hook(net, flowtable, hook);
+               nf_unregister_net_hook(net, &hook->ops);
 }
 
 static int nft_register_flowtable_net_hooks(struct net *net,
@@ -6440,12 +6481,14 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
 {
        struct nft_hook *hook, *next;
 
+       flowtable->data.type->free(&flowtable->data);
        list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+               flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+                                           FLOW_BLOCK_UNBIND);
                list_del_rcu(&hook->list);
                kfree(hook);
        }
        kfree(flowtable->name);
-       flowtable->data.type->free(&flowtable->data);
        module_put(flowtable->data.type->owner);
        kfree(flowtable);
 }
@@ -6489,6 +6532,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
                if (hook->ops.dev != dev)
                        continue;
 
+               /* flow_offload_netdev_event() cleans up entries for us. */
                nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
                list_del_rcu(&hook->list);
                kfree_rcu(hook, rcu);
@@ -6967,6 +7011,18 @@ static void nft_chain_del(struct nft_chain *chain)
        list_del_rcu(&chain->list);
 }
 
+static void nf_tables_module_autoload_cleanup(struct net *net)
+{
+       struct nft_module_request *req, *next;
+
+       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+       list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
+               WARN_ON_ONCE(!req->done);
+               list_del(&req->list);
+               kfree(req);
+       }
+}
+
 static void nf_tables_commit_release(struct net *net)
 {
        struct nft_trans *trans;
@@ -6979,6 +7035,7 @@ static void nf_tables_commit_release(struct net *net)
         * to prevent expensive synchronize_rcu() in commit phase.
         */
        if (list_empty(&net->nft.commit_list)) {
+               nf_tables_module_autoload_cleanup(net);
                mutex_unlock(&net->nft.commit_mutex);
                return;
        }
@@ -6993,6 +7050,7 @@ static void nf_tables_commit_release(struct net *net)
        list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
        spin_unlock(&nf_tables_destroy_list_lock);
 
+       nf_tables_module_autoload_cleanup(net);
        mutex_unlock(&net->nft.commit_mutex);
 
        schedule_work(&trans_destroy_work);
@@ -7184,6 +7242,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
        return 0;
 }
 
+static void nf_tables_module_autoload(struct net *net)
+{
+       struct nft_module_request *req, *next;
+       LIST_HEAD(module_list);
+
+       list_splice_init(&net->nft.module_list, &module_list);
+       mutex_unlock(&net->nft.commit_mutex);
+       list_for_each_entry_safe(req, next, &module_list, list) {
+               if (req->done) {
+                       list_del(&req->list);
+                       kfree(req);
+               } else {
+                       request_module("%s", req->module);
+                       req->done = true;
+               }
+       }
+       mutex_lock(&net->nft.commit_mutex);
+       list_splice(&module_list, &net->nft.module_list);
+}
+
 static void nf_tables_abort_release(struct nft_trans *trans)
 {
        switch (trans->msg_type) {
@@ -7213,7 +7291,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
        kfree(trans);
 }
 
-static int __nf_tables_abort(struct net *net)
+static int __nf_tables_abort(struct net *net, bool autoload)
 {
        struct nft_trans *trans, *next;
        struct nft_trans_elem *te;
@@ -7335,6 +7413,11 @@ static int __nf_tables_abort(struct net *net)
                nf_tables_abort_release(trans);
        }
 
+       if (autoload)
+               nf_tables_module_autoload(net);
+       else
+               nf_tables_module_autoload_cleanup(net);
+
        return 0;
 }
 
@@ -7343,9 +7426,9 @@ static void nf_tables_cleanup(struct net *net)
        nft_validate_state_update(net, NFT_VALIDATE_SKIP);
 }
 
-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
 {
-       int ret = __nf_tables_abort(net);
+       int ret = __nf_tables_abort(net, autoload);
 
        mutex_unlock(&net->nft.commit_mutex);
 
@@ -7595,7 +7678,7 @@ int nft_validate_register_load(enum nft_registers reg, unsigned int len)
                return -EINVAL;
        if (len == 0)
                return -EINVAL;
-       if (reg * NFT_REG32_SIZE + len > FIELD_SIZEOF(struct nft_regs, data))
+       if (reg * NFT_REG32_SIZE + len > sizeof_field(struct nft_regs, data))
                return -ERANGE;
 
        return 0;
@@ -7643,7 +7726,7 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
                if (len == 0)
                        return -EINVAL;
                if (reg * NFT_REG32_SIZE + len >
-                   FIELD_SIZEOF(struct nft_regs, data))
+                   sizeof_field(struct nft_regs, data))
                        return -ERANGE;
 
                if (data != NULL && type != NFT_DATA_VALUE)
@@ -7940,6 +8023,7 @@ static int __net_init nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.tables);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       INIT_LIST_HEAD(&net->nft.module_list);
        mutex_init(&net->nft.commit_mutex);
        net->nft.base_seq = 1;
        net->nft.validate_state = NFT_VALIDATE_SKIP;
@@ -7951,7 +8035,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
 {
        mutex_lock(&net->nft.commit_mutex);
        if (!list_empty(&net->nft.commit_list))
-               __nf_tables_abort(net);
+               __nf_tables_abort(net, false);
        __nft_release_tables(net);
        mutex_unlock(&net->nft.commit_mutex);
        WARN_ON_ONCE(!list_empty(&net->nft.tables));
index 431f3b803bfbba2cf7bfde61419b04b4a2c5e8b7..2bb28483af2253cdcda575b555f1d615cf1a1216 100644 (file)
@@ -44,6 +44,9 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
                expr = nft_expr_next(expr);
        }
 
+       if (num_actions == 0)
+               return ERR_PTR(-EOPNOTSUPP);
+
        flow = nft_flow_rule_alloc(num_actions);
        if (!flow)
                return ERR_PTR(-ENOMEM);
@@ -561,7 +564,7 @@ static void nft_indr_block_cb(struct net_device *dev,
 
        mutex_lock(&net->nft.commit_mutex);
        chain = __nft_offload_get_chain(dev);
-       if (chain) {
+       if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
                struct nft_base_chain *basechain;
 
                basechain = nft_base_chain(chain);
@@ -577,6 +580,9 @@ static int nft_offload_netdev_event(struct notifier_block *this,
        struct net *net = dev_net(dev);
        struct nft_chain *chain;
 
+       if (event != NETDEV_UNREGISTER)
+               return NOTIFY_DONE;
+
        mutex_lock(&net->nft.commit_mutex);
        chain = __nft_offload_get_chain(dev);
        if (chain)
index 4abbb452cf6c60400087f02963c2b58ba08896d7..99127e2d95a842e5b8e45da4847715e62709ca4c 100644 (file)
@@ -476,7 +476,7 @@ ack:
        }
 done:
        if (status & NFNL_BATCH_REPLAY) {
-               ss->abort(net, oskb);
+               ss->abort(net, oskb, true);
                nfnl_err_reset(&err_list);
                kfree_skb(skb);
                module_put(ss->owner);
@@ -487,11 +487,11 @@ done:
                        status |= NFNL_BATCH_REPLAY;
                        goto done;
                } else if (err) {
-                       ss->abort(net, oskb);
+                       ss->abort(net, oskb, false);
                        netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
                }
        } else {
-               ss->abort(net, oskb);
+               ss->abort(net, oskb, false);
        }
        if (ss->cleanup)
                ss->cleanup(net);
index 7525063c25f5f998ee26bd1a8289670dc258ab9c..de3a9596b7f1bca045b8683bc48f66230fb65e75 100644 (file)
@@ -236,7 +236,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
        nla_strlcpy(helper->name,
                    tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
        size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
-       if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
+       if (size > sizeof_field(struct nf_conn_help, data)) {
                ret = -ENOMEM;
                goto err2;
        }
index 02afa752dd2e626355217d2f31e731ac131a4ce7..10e9d50e4e193fb60c71fba6bb92aaed6bbc969c 100644 (file)
@@ -80,7 +80,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
                            tb[NFTA_BITWISE_MASK]);
        if (err < 0)
                return err;
-       if (d1.len != priv->len) {
+       if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) {
                err = -EINVAL;
                goto err1;
        }
@@ -89,7 +89,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
                            tb[NFTA_BITWISE_XOR]);
        if (err < 0)
                goto err1;
-       if (d2.len != priv->len) {
+       if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) {
                err = -EINVAL;
                goto err2;
        }
index b8092069f868fa7ee9608bbe3653e9ec701fbc8f..8a28c127effc22e3caedb34f0583f845f6b40a07 100644 (file)
@@ -81,6 +81,12 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        if (err < 0)
                return err;
 
+       if (desc.type != NFT_DATA_VALUE) {
+               err = -EINVAL;
+               nft_data_release(&priv->data, desc.type);
+               return err;
+       }
+
        priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
        err = nft_validate_register_load(priv->sreg, desc.len);
        if (err < 0)
index 46ca8bcca1bd5c31f900b4d89f11ee55b27098d9..faea72c2df328f6f691e1027a28a532364a5ea1e 100644 (file)
@@ -440,12 +440,12 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
 
                switch (ctx->family) {
                case NFPROTO_IPV4:
-                       len = FIELD_SIZEOF(struct nf_conntrack_tuple,
+                       len = sizeof_field(struct nf_conntrack_tuple,
                                           src.u3.ip);
                        break;
                case NFPROTO_IPV6:
                case NFPROTO_INET:
-                       len = FIELD_SIZEOF(struct nf_conntrack_tuple,
+                       len = sizeof_field(struct nf_conntrack_tuple,
                                           src.u3.ip6);
                        break;
                default:
@@ -457,20 +457,20 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
                if (tb[NFTA_CT_DIRECTION] == NULL)
                        return -EINVAL;
 
-               len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip);
+               len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip);
                break;
        case NFT_CT_SRC_IP6:
        case NFT_CT_DST_IP6:
                if (tb[NFTA_CT_DIRECTION] == NULL)
                        return -EINVAL;
 
-               len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip6);
+               len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip6);
                break;
        case NFT_CT_PROTO_SRC:
        case NFT_CT_PROTO_DST:
                if (tb[NFTA_CT_DIRECTION] == NULL)
                        return -EINVAL;
-               len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u.all);
+               len = sizeof_field(struct nf_conntrack_tuple, src.u.all);
                break;
        case NFT_CT_BYTES:
        case NFT_CT_PKTS:
@@ -551,7 +551,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
        case NFT_CT_MARK:
                if (tb[NFTA_CT_DIRECTION])
                        return -EINVAL;
-               len = FIELD_SIZEOF(struct nf_conn, mark);
+               len = sizeof_field(struct nf_conn, mark);
                break;
 #endif
 #ifdef CONFIG_NF_CONNTRACK_LABELS
index dd82ff2ee19fef7fe62afcbd1443719075b7df0f..b70b48996801a64e1da3c03792bbfd67350d8c8f 100644 (file)
@@ -200,9 +200,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
 static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
                                     const struct nft_expr *expr)
 {
-       struct nft_flow_offload *priv = nft_expr_priv(expr);
-
-       priv->flowtable->use--;
        nf_ct_netns_put(ctx->net, ctx->family);
 }
 
index 39dc94f2491e305f9ef8bdbb91b320dca0b52017..bc9fd98c5d6d9db92e0d982dce6f93ceb893224a 100644 (file)
@@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx,
                         const struct nft_expr *expr,
                         const struct nlattr * const tb[])
 {
-       u32 plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+       u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
        struct nft_masq *priv = nft_expr_priv(expr);
        int err;
 
index c3c93e95b46e78c7f8d32673b86b1b862a0d4167..8b44a4de53294f25f85eb84bd3d5d4aac568d109 100644 (file)
@@ -141,10 +141,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        switch (family) {
        case NFPROTO_IPV4:
-               alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip);
+               alen = sizeof_field(struct nf_nat_range, min_addr.ip);
                break;
        case NFPROTO_IPV6:
-               alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
+               alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
                break;
        default:
                return -EAFNOSUPPORT;
@@ -171,7 +171,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                }
        }
 
-       plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+       plen = sizeof_field(struct nf_nat_range, min_addr.all);
        if (tb[NFTA_NAT_REG_PROTO_MIN]) {
                priv->sreg_proto_min =
                        nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
index f54d6ae15bb18907635f142d2dfe124ca8cd2859..b42247aa48a9e9b1f4e836538ae6558c1a78f3c4 100644 (file)
@@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
        int err;
        u8 ttl;
 
+       if (!tb[NFTA_OSF_DREG])
+               return -EINVAL;
+
        if (tb[NFTA_OSF_TTL]) {
                ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
                if (ttl > 2)
index 4701fa8a45e746e3f0a01e44813baeeaae1556e0..89efcc5a533d2481127295dbc01a55438fdfbc9c 100644 (file)
@@ -66,11 +66,21 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
        if (err < 0)
                return err;
 
+       if (desc_from.type != NFT_DATA_VALUE) {
+               err = -EINVAL;
+               goto err1;
+       }
+
        err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to),
                            &desc_to, tb[NFTA_RANGE_TO_DATA]);
        if (err < 0)
                goto err1;
 
+       if (desc_to.type != NFT_DATA_VALUE) {
+               err = -EINVAL;
+               goto err2;
+       }
+
        if (desc_from.len != desc_to.len) {
                err = -EINVAL;
                goto err2;
index 43eeb1f609f135ab7db6033e067c032b48c8a49a..5b779171565c1b2eb441ff1420cc31a97398beab 100644 (file)
@@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
        unsigned int plen;
        int err;
 
-       plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all);
+       plen = sizeof_field(struct nf_nat_range, min_addr.all);
        if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
                priv->sreg_proto_min =
                        nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
index 57123259452ffc7de21ed72cd9aacdf7cc01e24e..a9f804f7a04ac699fd171b72b64881bba0cd0208 100644 (file)
@@ -74,8 +74,13 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
                                parent = rcu_dereference_raw(parent->rb_left);
                                continue;
                        }
-                       if (nft_rbtree_interval_end(rbe))
-                               goto out;
+                       if (nft_rbtree_interval_end(rbe)) {
+                               if (nft_set_is_anonymous(set))
+                                       return false;
+                               parent = rcu_dereference_raw(parent->rb_left);
+                               interval = NULL;
+                               continue;
+                       }
 
                        *ext = &rbe->ext;
                        return true;
@@ -88,7 +93,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
                *ext = &interval->ext;
                return true;
        }
-out:
+
        return false;
 }
 
@@ -139,8 +144,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
                        if (flags & NFT_SET_ELEM_INTERVAL_END)
                                interval = rbe;
                } else {
-                       if (!nft_set_elem_active(&rbe->ext, genmask))
+                       if (!nft_set_elem_active(&rbe->ext, genmask)) {
                                parent = rcu_dereference_raw(parent->rb_left);
+                               continue;
+                       }
 
                        if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
                            (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
@@ -148,7 +155,11 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
                                *elem = rbe;
                                return true;
                        }
-                       return false;
+
+                       if (nft_rbtree_interval_end(rbe))
+                               interval = NULL;
+
+                       parent = rcu_dereference_raw(parent->rb_left);
                }
        }
 
index f92a82c73880807bfa1be1d65b483049b2378b3c..d67f83a0958d3661aa470a8892e1507386b87195 100644 (file)
@@ -50,7 +50,7 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
        taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr);
 
        if (priv->sreg_port)
-               tport = regs->data[priv->sreg_port];
+               tport = nft_reg_load16(&regs->data[priv->sreg_port]);
        if (!tport)
                tport = hp->dest;
 
@@ -117,7 +117,7 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
        taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr);
 
        if (priv->sreg_port)
-               tport = regs->data[priv->sreg_port];
+               tport = nft_reg_load16(&regs->data[priv->sreg_port]);
        if (!tport)
                tport = hp->dest;
 
@@ -218,14 +218,14 @@ static int nft_tproxy_init(const struct nft_ctx *ctx,
 
        switch (priv->family) {
        case NFPROTO_IPV4:
-               alen = FIELD_SIZEOF(union nf_inet_addr, in);
+               alen = sizeof_field(union nf_inet_addr, in);
                err = nf_defrag_ipv4_enable(ctx->net);
                if (err)
                        return err;
                break;
 #if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
        case NFPROTO_IPV6:
-               alen = FIELD_SIZEOF(union nf_inet_addr, in6);
+               alen = sizeof_field(union nf_inet_addr, in6);
                err = nf_defrag_ipv6_enable(ctx->net);
                if (err)
                        return err;
index 3d4c2ae605a8e2c3ceee669570775cd5ebbb3e1e..5284fcf16be73463f8ac679989298b6b5d520096 100644 (file)
@@ -76,7 +76,7 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx,
        struct nft_tunnel *priv = nft_expr_priv(expr);
        u32 len;
 
-       if (!tb[NFTA_TUNNEL_KEY] &&
+       if (!tb[NFTA_TUNNEL_KEY] ||
            !tb[NFTA_TUNNEL_DREG])
                return -EINVAL;
 
@@ -266,6 +266,9 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
        if (err < 0)
                return err;
 
+       if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
+                return -EINVAL;
+
        version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
        switch (version) {
        case ERSPAN_VERSION:
index 2236455b10a3671b25547f048c26863fd651a70a..37253d399c6b8dd3ef1da16ab2ed78fd5567e2c6 100644 (file)
@@ -30,7 +30,7 @@ static unsigned int jhash_rnd __read_mostly;
 
 static unsigned int xt_rateest_hash(const char *name)
 {
-       return jhash(name, FIELD_SIZEOF(struct xt_rateest, name), jhash_rnd) &
+       return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) &
               (RATEEST_HSIZE - 1);
 }
 
index 90b2ab9dd449555260b2578d73539b2712a4a1f4..4e31721e729360c8bf555186ab6d4aa67cb00280 100644 (file)
@@ -2755,7 +2755,7 @@ static int __init netlink_proto_init(void)
        if (err != 0)
                goto out;
 
-       BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
+       BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb));
 
        nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
        if (!nl_table)
index 78fe622eba6579979f392ec0b03fd8627638a755..11b554ce07ffcc1ea20b8c388206ca1504fc4405 100644 (file)
@@ -346,7 +346,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
                        nu->rx_packet_len = -1;
                        nu->rx_skb = nci_skb_alloc(nu->ndev,
                                                   NCI_MAX_PACKET_SIZE,
-                                                  GFP_KERNEL);
+                                                  GFP_ATOMIC);
                        if (!nu->rx_skb)
                                return -ENOMEM;
                }
index 1047e8043084fdacb53b685027ea23e0697716aa..e3a37d22539c061ef946e648a44911a5edf4d42d 100644 (file)
@@ -2497,7 +2497,7 @@ static int __init dp_init(void)
 {
        int err;
 
-       BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+       BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof_field(struct sk_buff, cb));
 
        pr_info("Open vSwitch switching datapath\n");
 
index fd8ed766bdd17100ef8076af653905e9a8e3ae31..758a8c77f7361110268a756c001abe7dfbfa406f 100644 (file)
@@ -37,7 +37,7 @@ enum sw_flow_mac_proto {
  * matching for small options.
  */
 #define TUN_METADATA_OFFSET(opt_len) \
-       (FIELD_SIZEOF(struct sw_flow_key, tun_opts) - opt_len)
+       (sizeof_field(struct sw_flow_key, tun_opts) - opt_len)
 #define TUN_METADATA_OPTS(flow_key, opt_len) \
        ((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
 
@@ -52,7 +52,7 @@ struct vlan_head {
 
 #define OVS_SW_FLOW_KEY_METADATA_SIZE                  \
        (offsetof(struct sw_flow_key, recirc_id) +      \
-       FIELD_SIZEOF(struct sw_flow_key, recirc_id))
+       sizeof_field(struct sw_flow_key, recirc_id))
 
 struct ovs_key_nsh {
        struct ovs_nsh_key_base base;
index 53c1d41fb1c98219be5667214a6c05fb674c887d..118cd66b75163f4a3a844964406e5af36cc9919f 100644 (file)
@@ -544,7 +544,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
                        msec = 1;
                        div = ecmd.base.speed / 1000;
                }
-       }
+       } else
+               return DEFAULT_PRB_RETIRE_TOV;
 
        mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 
index 88f98f27ad88e5d786ca771a94801c63c9c419a1..3d24d45be5f446199779ddc7d52b1414c5e4e8f8 100644 (file)
@@ -196,7 +196,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
        hdr->size = cpu_to_le32(len);
        hdr->confirm_rx = 0;
 
-       skb_put_padto(skb, ALIGN(len, 4));
+       skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
 
        mutex_lock(&node->ep_lock);
        if (node->ep)
index 461d75274fb3c71e2825bd1c9404505b655df4b3..971c73c7d34cbcab766cd7bae1e1305a909c25f4 100644 (file)
@@ -1002,10 +1002,13 @@ static void rfkill_sync_work(struct work_struct *work)
 int __must_check rfkill_register(struct rfkill *rfkill)
 {
        static unsigned long rfkill_no;
-       struct device *dev = &rfkill->dev;
+       struct device *dev;
        int error;
 
-       BUG_ON(!rfkill);
+       if (!rfkill)
+               return -EINVAL;
+
+       dev = &rfkill->dev;
 
        mutex_lock(&rfkill_global_mutex);
 
index 46b8ff24020d7bb6788e985686bef12b4fbc83d0..1e8eeb044b07d8c402cfd101896ecc02624e0360 100644 (file)
@@ -1475,7 +1475,7 @@ static int __init rose_proto_init(void)
        int rc;
 
        if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
-               printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
+               printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
                rc = -EINVAL;
                goto out;
        }
index d72ddb67bb742a0f6db6db765dd0035c94f636e8..9d3c4d2d893ab2372af651704efdc512494733a2 100644 (file)
@@ -972,7 +972,7 @@ static int __init af_rxrpc_init(void)
        int ret = -1;
        unsigned int tmp;
 
-       BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
+       BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
 
        get_random_bytes(&tmp, sizeof(tmp));
        tmp &= 0x3fffffff;
index 7c7d10f2e0c181776611cd54d6048a17ed512d5f..5e99df80e80a775b26e0de82eec5738454398abc 100644 (file)
@@ -209,6 +209,7 @@ struct rxrpc_skb_priv {
 struct rxrpc_security {
        const char              *name;          /* name of this service */
        u8                      security_index; /* security type provided */
+       u32                     no_key_abort;   /* Abort code indicating no key */
 
        /* Initialise a security service */
        int (*init)(void);
@@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
                                                     struct sk_buff *);
 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
-void rxrpc_new_incoming_connection(struct rxrpc_sock *,
-                                  struct rxrpc_connection *, struct sk_buff *);
+void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
+                                  const struct rxrpc_security *, struct key *,
+                                  struct sk_buff *);
 void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
 
 /*
@@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad;
 int __init rxrpc_init_security(void);
 void rxrpc_exit_security(void);
 int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
+                                  const struct rxrpc_security **, struct key **,
+                                  struct sk_buff *);
 
 /*
  * sendmsg.c
index 135bf5cd8dd51be0414ebb89975331d3f47d7b4e..70e44abf106c86c731162a7a40be9963ba9b25f4 100644 (file)
@@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
        kfree(b);
 }
 
+/*
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
+ */
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       ktime_t now = skb->tstamp;
+
+       if (call->peer->rtt_usage < 3 ||
+           ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+               rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+                                 true, true,
+                                 rxrpc_propose_ack_ping_for_params);
+}
+
 /*
  * Allocate a new incoming call from the prealloc pool, along with a connection
  * and a peer as necessary.
@@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                                    struct rxrpc_local *local,
                                                    struct rxrpc_peer *peer,
                                                    struct rxrpc_connection *conn,
+                                                   const struct rxrpc_security *sec,
+                                                   struct key *key,
                                                    struct sk_buff *skb)
 {
        struct rxrpc_backlog *b = rx->backlog;
@@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                conn->params.local = rxrpc_get_local(local);
                conn->params.peer = peer;
                rxrpc_see_connection(conn);
-               rxrpc_new_incoming_connection(rx, conn, skb);
+               rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
        } else {
                rxrpc_get_connection(conn);
        }
@@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       const struct rxrpc_security *sec = NULL;
        struct rxrpc_connection *conn;
        struct rxrpc_peer *peer = NULL;
-       struct rxrpc_call *call;
+       struct rxrpc_call *call = NULL;
+       struct key *key = NULL;
 
        _enter("");
 
@@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
                skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
                skb->priority = RX_INVALID_OPERATION;
-               _leave(" = NULL [close]");
-               call = NULL;
-               goto out;
+               goto no_call;
        }
 
        /* The peer, connection and call may all have sprung into existence due
@@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
         */
        conn = rxrpc_find_connection_rcu(local, skb, &peer);
 
-       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
+       if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
+               goto no_call;
+
+       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
+       key_put(key);
        if (!call) {
                skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
-               _leave(" = NULL [busy]");
-               call = NULL;
-               goto out;
+               goto no_call;
        }
 
        trace_rxrpc_receive(call, rxrpc_receive_incoming,
                            sp->hdr.serial, sp->hdr.seq);
 
-       /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
-        * sendmsg()/recvmsg() inconveniently stealing the mutex once the
-        * notification is generated.
-        *
-        * The BUG should never happen because the kernel should be well
-        * behaved enough not to access the call before the first notification
-        * event and userspace is prevented from doing so until the state is
-        * appropriate.
-        */
-       if (!mutex_trylock(&call->user_mutex))
-               BUG();
-
        /* Make the call live. */
        rxrpc_incoming_call(rx, call, skb);
        conn = call->conn;
@@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                BUG();
        }
        spin_unlock(&conn->state_lock);
+       spin_unlock(&rx->incoming_lock);
+
+       rxrpc_send_ping(call, skb);
 
        if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
                rxrpc_notify_socket(call);
@@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
        rxrpc_put_call(call, rxrpc_call_put);
 
        _leave(" = %p{%d}", call, call->debug_id);
-out:
-       spin_unlock(&rx->incoming_lock);
        return call;
+
+no_call:
+       spin_unlock(&rx->incoming_lock);
+       _leave(" = NULL [%u]", skb->mark);
+       return NULL;
 }
 
 /*
index a1ceef4f5cd07754fff2fa5e1472ff60a50a272e..808a4723f8684c849da712808938df11f3ce4872 100644 (file)
@@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
        _enter("{%d}", conn->debug_id);
 
        ASSERT(conn->security_ix != 0);
-
-       if (!conn->params.key) {
-               _debug("set up security");
-               ret = rxrpc_init_server_conn_security(conn);
-               switch (ret) {
-               case 0:
-                       break;
-               case -ENOENT:
-                       abort_code = RX_CALL_DEAD;
-                       goto abort;
-               default:
-                       abort_code = RXKADNOAUTH;
-                       goto abort;
-               }
-       }
+       ASSERT(conn->server_key);
 
        if (conn->security->issue_challenge(conn) < 0) {
                abort_code = RX_CALL_DEAD;
index 123d6ceab15cb0b00ccf65aec61370b64cda811b..21da48e3d2e5188b79370910753dac54cfd14760 100644 (file)
@@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
  */
 void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
                                   struct rxrpc_connection *conn,
+                                  const struct rxrpc_security *sec,
+                                  struct key *key,
                                   struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
        conn->service_id        = sp->hdr.serviceId;
        conn->security_ix       = sp->hdr.securityIndex;
        conn->out_clientflag    = 0;
+       conn->security          = sec;
+       conn->server_key        = key_get(key);
        if (conn->security_ix)
                conn->state     = RXRPC_CONN_SERVICE_UNSECURED;
        else
index 157be1ff8697be438a7ee6636e37671169fff3df..86bd133b4fa0a858595eb2e259d99b31e58c47e7 100644 (file)
@@ -192,22 +192,6 @@ send_extra_data:
        goto out_no_clear_ca;
 }
 
-/*
- * Ping the other end to fill our RTT cache and to retrieve the rwind
- * and MTU parameters.
- */
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
-{
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       ktime_t now = skb->tstamp;
-
-       if (call->peer->rtt_usage < 3 ||
-           ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
-               rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
-                                 true, true,
-                                 rxrpc_propose_ack_ping_for_params);
-}
-
 /*
  * Apply a hard ACK by advancing the Tx window.
  */
@@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
                call = rxrpc_new_incoming_call(local, rx, skb);
                if (!call)
                        goto reject_packet;
-               rxrpc_send_ping(call, skb);
-               mutex_unlock(&call->user_mutex);
        }
 
        /* Process a call packet; this either discards or passes on the ref
index 8d8aa3c230b5515d8cde676432c77a733f14afe5..098f1f9ec53ba10642dc0c3d2c608a44de688e5b 100644 (file)
@@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
        u32 serial;
        int ret;
 
-       _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
+       _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
 
-       ret = key_validate(conn->params.key);
+       ret = key_validate(conn->server_key);
        if (ret < 0)
                return ret;
 
@@ -1293,6 +1293,7 @@ static void rxkad_exit(void)
 const struct rxrpc_security rxkad = {
        .name                           = "rxkad",
        .security_index                 = RXRPC_SECURITY_RXKAD,
+       .no_key_abort                   = RXKADUNKNOWNKEY,
        .init                           = rxkad_init,
        .exit                           = rxkad_exit,
        .init_connection_security       = rxkad_init_connection_security,
index a4c47d2b705478eabf87b699da6852207f525f56..9b1fb9ed07177215aa533666d9af0fbc8bda791e 100644 (file)
@@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 }
 
 /*
- * initialise the security on a server connection
+ * Find the security key for a server connection.
  */
-int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
+bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx,
+                                  const struct rxrpc_security **_sec,
+                                  struct key **_key,
+                                  struct sk_buff *skb)
 {
        const struct rxrpc_security *sec;
-       struct rxrpc_local *local = conn->params.local;
-       struct rxrpc_sock *rx;
-       struct key *key;
-       key_ref_t kref;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       key_ref_t kref = NULL;
        char kdesc[5 + 1 + 3 + 1];
 
        _enter("");
 
-       sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
+       sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex);
 
-       sec = rxrpc_security_lookup(conn->security_ix);
+       sec = rxrpc_security_lookup(sp->hdr.securityIndex);
        if (!sec) {
-               _leave(" = -ENOKEY [lookup]");
-               return -ENOKEY;
+               trace_rxrpc_abort(0, "SVS",
+                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                                 RX_INVALID_OPERATION, EKEYREJECTED);
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+               skb->priority = RX_INVALID_OPERATION;
+               return false;
        }
 
-       /* find the service */
-       read_lock(&local->services_lock);
-       rx = rcu_dereference_protected(local->service,
-                                      lockdep_is_held(&local->services_lock));
-       if (rx && (rx->srx.srx_service == conn->service_id ||
-                  rx->second_service == conn->service_id))
-               goto found_service;
+       if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE)
+               goto out;
 
-       /* the service appears to have died */
-       read_unlock(&local->services_lock);
-       _leave(" = -ENOENT");
-       return -ENOENT;
-
-found_service:
        if (!rx->securities) {
-               read_unlock(&local->services_lock);
-               _leave(" = -ENOKEY");
-               return -ENOKEY;
+               trace_rxrpc_abort(0, "SVR",
+                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                                 RX_INVALID_OPERATION, EKEYREJECTED);
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+               skb->priority = RX_INVALID_OPERATION;
+               return false;
        }
 
        /* look through the service's keyring */
        kref = keyring_search(make_key_ref(rx->securities, 1UL),
                              &key_type_rxrpc_s, kdesc, true);
        if (IS_ERR(kref)) {
-               read_unlock(&local->services_lock);
-               _leave(" = %ld [search]", PTR_ERR(kref));
-               return PTR_ERR(kref);
+               trace_rxrpc_abort(0, "SVK",
+                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                                 sec->no_key_abort, EKEYREJECTED);
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+               skb->priority = sec->no_key_abort;
+               return false;
        }
 
-       key = key_ref_to_ptr(kref);
-       read_unlock(&local->services_lock);
-
-       conn->server_key = key;
-       conn->security = sec;
-
-       _leave(" = 0");
-       return 0;
+out:
+       *_sec = sec;
+       *_key = key_ref_to_ptr(kref);
+       return true;
 }
index bf2d69335d4b963e254da4c1c93b6706438eb8d4..f685c0d737086d5a0317f87cbfeb890e2f4f2a9b 100644 (file)
@@ -312,7 +312,7 @@ static void tcf_ct_act_set_labels(struct nf_conn *ct,
                                  u32 *labels_m)
 {
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
-       size_t labels_sz = FIELD_SIZEOF(struct tcf_ct_params, labels);
+       size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
 
        if (!memchr_inv(labels_m, 0, labels_sz))
                return;
@@ -936,7 +936,7 @@ static struct tc_action_ops act_ct_ops = {
 
 static __net_init int ct_init_net(struct net *net)
 {
-       unsigned int n_bits = FIELD_SIZEOF(struct tcf_ct_params, labels) * 8;
+       unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
        struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
 
        if (nf_connlabels_get(net, n_bits - 1)) {
index 40038c321b4a970dc940714ccda4b39f0d261d6a..19649623493b158b3008c82ce2409ae80ffa6dc6 100644 (file)
@@ -360,6 +360,16 @@ static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index)
        return tcf_idr_search(tn, a, index);
 }
 
+static void tcf_ctinfo_cleanup(struct tc_action *a)
+{
+       struct tcf_ctinfo *ci = to_ctinfo(a);
+       struct tcf_ctinfo_params *cp;
+
+       cp = rcu_dereference_protected(ci->params, 1);
+       if (cp)
+               kfree_rcu(cp, rcu);
+}
+
 static struct tc_action_ops act_ctinfo_ops = {
        .kind   = "ctinfo",
        .id     = TCA_ID_CTINFO,
@@ -367,6 +377,7 @@ static struct tc_action_ops act_ctinfo_ops = {
        .act    = tcf_ctinfo_act,
        .dump   = tcf_ctinfo_dump,
        .init   = tcf_ctinfo_init,
+       .cleanup= tcf_ctinfo_cleanup,
        .walk   = tcf_ctinfo_walker,
        .lookup = tcf_ctinfo_search,
        .size   = sizeof(struct tcf_ctinfo),
index 5e6379028fc392031f4b84599f666a2c61f071d2..c1fcd85719d6a7fa86e65ebb89e82e0d931b7ea0 100644 (file)
@@ -537,6 +537,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        }
 
        ife = to_ife(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&ife->metalist);
+
        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
        if (err < 0)
                goto release_idr;
@@ -566,10 +569,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                p->eth_type = ife_type;
        }
 
-
-       if (ret == ACT_P_CREATED)
-               INIT_LIST_HEAD(&ife->metalist);
-
        if (tb[TCA_IFE_METALST]) {
                err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
                                                  tb[TCA_IFE_METALST], NULL,
index 1e3eb3a975324a2630f1b66f08655ea6de8d331c..1ad300e6dbc0a0bfad2c278101e6f3446752ec2e 100644 (file)
@@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
        bool use_reinsert;
        bool want_ingress;
        bool is_redirect;
+       bool expects_nh;
        int m_eaction;
        int mac_len;
+       bool at_nh;
 
        rec_level = __this_cpu_inc_return(mirred_rec_level);
        if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
@@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                        goto out;
        }
 
-       /* If action's target direction differs than filter's direction,
-        * and devices expect a mac header on xmit, then mac push/pull is
-        * needed.
-        */
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
-       if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
-               if (!skb_at_tc_ingress(skb)) {
-                       /* caught at egress, act ingress: pull mac */
-                       mac_len = skb_network_header(skb) - skb_mac_header(skb);
+
+       expects_nh = want_ingress || !m_mac_header_xmit;
+       at_nh = skb->data == skb_network_header(skb);
+       if (at_nh != expects_nh) {
+               mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
+                         skb_network_header(skb) - skb_mac_header(skb);
+               if (expects_nh) {
+                       /* target device/action expect data at nh */
                        skb_pull_rcsum(skb2, mac_len);
                } else {
-                       /* caught at ingress, act egress: push mac */
-                       skb_push_rcsum(skb2, skb->mac_len);
+                       /* target device/action expect data at mac */
+                       skb_push_rcsum(skb2, mac_len);
                }
        }
 
index 6a0eacafdb19117701e70fbec63ca7028ecd14a9..c2cdd0fc2e70990a8f1e871238fd32246dae0ed2 100644 (file)
@@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
                tcf_proto_destroy(tp, rtnl_held, true, extack);
 }
 
-static int walker_check_empty(struct tcf_proto *tp, void *fh,
-                             struct tcf_walker *arg)
+static bool tcf_proto_check_delete(struct tcf_proto *tp)
 {
-       if (fh) {
-               arg->nonempty = true;
-               return -1;
-       }
-       return 0;
-}
-
-static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
-{
-       struct tcf_walker walker = { .fn = walker_check_empty, };
-
-       if (tp->ops->walk) {
-               tp->ops->walk(tp, &walker, rtnl_held);
-               return !walker.nonempty;
-       }
-       return true;
-}
+       if (tp->ops->delete_empty)
+               return tp->ops->delete_empty(tp);
 
-static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
-{
-       spin_lock(&tp->lock);
-       if (tcf_proto_is_empty(tp, rtnl_held))
-               tp->deleting = true;
-       spin_unlock(&tp->lock);
+       tp->deleting = true;
        return tp->deleting;
 }
 
@@ -1751,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
         * concurrently.
         * Mark tp for deletion if it is empty.
         */
-       if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
+       if (!tp_iter || !tcf_proto_check_delete(tp)) {
                mutex_unlock(&chain->filter_chain_lock);
                return;
        }
@@ -2076,9 +2055,8 @@ replay:
                                                               &chain_info));
 
                mutex_unlock(&chain->filter_chain_lock);
-               tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
-                                         protocol, prio, chain, rtnl_held,
-                                         extack);
+               tp_new = tcf_proto_create(name, protocol, prio, chain,
+                                         rtnl_held, extack);
                if (IS_ERR(tp_new)) {
                        err = PTR_ERR(tp_new);
                        goto errout_tp;
index 6c68971d99df702d79d651f38b6b5c1257c332e9..b0f42e62dd7607b4ee55ad39443ca075b9356bcf 100644 (file)
@@ -1481,7 +1481,7 @@ static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
 }
 
 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
-#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
+#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
 
 #define FL_KEY_IS_MASKED(mask, member)                                         \
        memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),               \
@@ -2773,6 +2773,17 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
                f->res.class = cl;
 }
 
+static bool fl_delete_empty(struct tcf_proto *tp)
+{
+       struct cls_fl_head *head = fl_head_dereference(tp);
+
+       spin_lock(&tp->lock);
+       tp->deleting = idr_is_empty(&head->handle_idr);
+       spin_unlock(&tp->lock);
+
+       return tp->deleting;
+}
+
 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .kind           = "flower",
        .classify       = fl_classify,
@@ -2782,6 +2793,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .put            = fl_put,
        .change         = fl_change,
        .delete         = fl_delete,
+       .delete_empty   = fl_delete_empty,
        .walk           = fl_walk,
        .reoffload      = fl_reoffload,
        .hw_add         = fl_hw_add,
index 8f2ad706784d24b6daec7a7cee5baca2e743112f..d0140a92694a4e6185ff80bdd00ef75b7267872c 100644 (file)
@@ -263,12 +263,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
                                }
                                em->data = (unsigned long) v;
                        }
+                       em->datalen = data_len;
                }
        }
 
        em->matchid = em_hdr->matchid;
        em->flags = em_hdr->flags;
-       em->datalen = data_len;
        em->net = net;
 
        err = 0;
index e0f40400f679c2ae40aecd43c2a4118fab515505..2277369feae58bfb9c70b449e7e096ae5f61dae1 100644 (file)
@@ -1769,7 +1769,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                                                      q->avg_window_begin));
                        u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
 
-                       do_div(b, window_interval);
+                       b = div64_u64(b, window_interval);
                        q->avg_peak_bandwidth =
                                cake_ewma(q->avg_peak_bandwidth, b,
                                          b > q->avg_peak_bandwidth ? 2 : 8);
index b1c7e726ce5d1ae139f765c5b92dfdaea9bee258..a5a295477eccd52952e26e2ce121315341dddd0f 100644 (file)
@@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
                                     f->socket_hash != sk->sk_hash)) {
                                f->credit = q->initial_quantum;
                                f->socket_hash = sk->sk_hash;
+                               if (q->rate_enable)
+                                       smp_store_release(&sk->sk_pacing_status,
+                                                         SK_PACING_FQ);
                                if (fq_flow_is_throttled(f))
                                        fq_flow_unset_throttled(q, f);
                                f->time_next_packet = 0ULL;
@@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 
        fq_flow_set_detached(f);
        f->sk = sk;
-       if (skb->sk == sk)
+       if (skb->sk == sk) {
                f->socket_hash = sk->sk_hash;
+               if (q->rate_enable)
+                       smp_store_release(&sk->sk_pacing_status,
+                                         SK_PACING_FQ);
+       }
        f->credit = q->initial_quantum;
 
        rb_link_node(&f->fq_node, parent, p);
@@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        f->qlen++;
        qdisc_qstats_backlog_inc(sch, skb);
        if (fq_flow_is_detached(f)) {
-               struct sock *sk = skb->sk;
-
                fq_flow_add_tail(&q->new_flows, f);
                if (time_after(jiffies, f->age + q->flow_refill_delay))
                        f->credit = max_t(u32, f->credit, q->quantum);
-               if (sk && q->rate_enable) {
-                       if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
-                                    SK_PACING_FQ))
-                               smp_store_release(&sk->sk_pacing_status,
-                                                 SK_PACING_FQ);
-               }
                q->inactive_flows--;
        }
 
@@ -787,10 +786,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
        if (tb[TCA_FQ_QUANTUM]) {
                u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 
-               if (quantum > 0)
+               if (quantum > 0 && quantum <= (1 << 20)) {
                        q->quantum = quantum;
-               else
+               } else {
+                       NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
                        err = -EINVAL;
+               }
        }
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
index 18b884cfdfe8de12223dfa3f8058b8c31726a885..647941702f9fcaa47242382c8489a8ee179064f4 100644 (file)
@@ -292,8 +292,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
        struct tc_prio_qopt_offload graft_offload;
        unsigned long band = arg - 1;
 
-       if (new == NULL)
-               new = &noop_qdisc;
+       if (!new) {
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       TC_H_MAKE(sch->handle, arg), extack);
+               if (!new)
+                       new = &noop_qdisc;
+               else
+                       qdisc_hash_add(new, true);
+       }
 
        *old = qdisc_replace(sch, new, &q->queues[band]);
 
index fbbf19128c2d752d05cf7694a54912a6d54bc69f..78af2fcf90cc50cdfca6e7c91bd63dd841eb5ec2 100644 (file)
@@ -227,6 +227,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
                sa->sin_port = sh->dest;
                sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
        }
+       memset(sa->sin_zero, 0, sizeof(sa->sin_zero));
 }
 
 /* Initialize an sctp_addr from a socket. */
@@ -235,6 +236,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
        addr->v4.sin_family = AF_INET;
        addr->v4.sin_port = 0;
        addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -257,6 +259,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr,
        addr->v4.sin_family = AF_INET;
        addr->v4.sin_port = port;
        addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
 }
 
 /* Initialize an address parameter from a sctp_addr and return the length
@@ -281,6 +284,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
        saddr->v4.sin_family = AF_INET;
        saddr->v4.sin_port = port;
        saddr->v4.sin_addr.s_addr = fl4->saddr;
+       memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
 }
 
 /* Compare two addresses exactly. */
@@ -303,6 +307,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
        addr->v4.sin_family = AF_INET;
        addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
        addr->v4.sin_port = port;
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
 }
 
 /* Is this a wildcard address? */
index acd737d4c0e0a938895ed10fc8aa39e3f908b64f..834e9f82afedc47e99fab3cd140d1d82795e93ac 100644 (file)
@@ -1363,8 +1363,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        /* Generate an INIT ACK chunk.  */
                        new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
                                                     0);
-                       if (!new_obj)
-                               goto nomem;
+                       if (!new_obj) {
+                               error = -ENOMEM;
+                               break;
+                       }
 
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(new_obj));
@@ -1386,7 +1388,8 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        if (!new_obj) {
                                if (cmd->obj.chunk)
                                        sctp_chunk_free(cmd->obj.chunk);
-                               goto nomem;
+                               error = -ENOMEM;
+                               break;
                        }
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(new_obj));
@@ -1433,8 +1436,10 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
 
                        /* Generate a SHUTDOWN chunk.  */
                        new_obj = sctp_make_shutdown(asoc, chunk);
-                       if (!new_obj)
-                               goto nomem;
+                       if (!new_obj) {
+                               error = -ENOMEM;
+                               break;
+                       }
                        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
                                        SCTP_CHUNK(new_obj));
                        break;
@@ -1770,11 +1775,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        break;
                }
 
-               if (error)
+               if (error) {
+                       cmd = sctp_next_cmd(commands);
+                       while (cmd) {
+                               if (cmd->verb == SCTP_CMD_REPLY)
+                                       sctp_chunk_free(cmd->obj.chunk);
+                               cmd = sctp_next_cmd(commands);
+                       }
                        break;
+               }
        }
 
-out:
        /* If this is in response to a received chunk, wait until
         * we are done with the packet to open the queue so that we don't
         * send multiple packets in response to a single request.
@@ -1789,7 +1800,4 @@ out:
                sp->data_ready_signalled = 0;
 
        return error;
-nomem:
-       error = -ENOMEM;
-       goto out;
 }
index e83cdaa2ab765c1ce20392b5ba2d91573f39ff14..c1a100d2fed39c2d831487e05fcbf5e8d507d470 100644 (file)
@@ -119,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
         * a new one with new outcnt to save memory if needed.
         */
        if (outcnt == stream->outcnt)
-               goto in;
+               goto handle_in;
 
        /* Filter out chunks queued on streams that won't exist anymore */
        sched->unsched_all(stream);
@@ -128,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 
        ret = sctp_stream_alloc_out(stream, outcnt, gfp);
        if (ret)
-               goto out;
+               goto out_err;
 
        for (i = 0; i < stream->outcnt; i++)
                SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
-in:
+handle_in:
        sctp_stream_interleave_init(stream);
        if (!incnt)
                goto out;
 
        ret = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (ret) {
-               sched->free(stream);
-               genradix_free(&stream->out);
-               stream->outcnt = 0;
-               goto out;
-       }
+       if (ret)
+               goto in_err;
 
+       goto out;
+
+in_err:
+       sched->free(stream);
+       genradix_free(&stream->in);
+out_err:
+       genradix_free(&stream->out);
+       stream->outcnt = 0;
 out:
        return ret;
 }
index 7235a60326712186060ffc918b8cd14c596c2a3c..3bbe1a58ec876ec594256166c948ade2d9b4948b 100644 (file)
@@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 
                pf->af->from_sk(&addr, sk);
                pf->to_sk_daddr(&t->ipaddr, sk);
-               dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+               dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
                pf->to_sk_daddr(&addr, sk);
 
                dst = sctp_transport_dst_check(t);
index b997072c72e5092fb955ad69957fc79942889033..cee5bf4a9bb95a517861ddebc0c4deaf603da245 100644 (file)
@@ -857,6 +857,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
                goto out;
 
        sock_hold(&smc->sk); /* sock put in passive closing */
+       if (smc->use_fallback)
+               goto out;
        if (flags & O_NONBLOCK) {
                if (schedule_work(&smc->connect_work))
                        smc->connect_nonblock = 1;
@@ -1721,8 +1723,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
                sk->sk_err = smc->clcsock->sk->sk_err;
                sk->sk_error_report(sk);
        }
-       if (rc)
-               return rc;
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -1730,6 +1730,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
                return -EFAULT;
 
        lock_sock(sk);
+       if (rc || smc->use_fallback)
+               goto out;
        switch (optname) {
        case TCP_ULP:
        case TCP_FASTOPEN:
@@ -1741,15 +1743,14 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
                        smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
-                       if (!smc->use_fallback)
-                               rc = -EINVAL;
+                       rc = -EINVAL;
                }
                break;
        case TCP_NODELAY:
                if (sk->sk_state != SMC_INIT &&
                    sk->sk_state != SMC_LISTEN &&
                    sk->sk_state != SMC_CLOSED) {
-                       if (val && !smc->use_fallback)
+                       if (val)
                                mod_delayed_work(system_wq, &smc->conn.tx_work,
                                                 0);
                }
@@ -1758,7 +1759,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
                if (sk->sk_state != SMC_INIT &&
                    sk->sk_state != SMC_LISTEN &&
                    sk->sk_state != SMC_CLOSED) {
-                       if (!val && !smc->use_fallback)
+                       if (!val)
                                mod_delayed_work(system_wq, &smc->conn.tx_work,
                                                 0);
                }
@@ -1769,6 +1770,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        default:
                break;
        }
+out:
        release_sock(sk);
 
        return rc;
index bb92c7c6214c30d32dfaa38f3e97ae743c384d2c..e419ff277e55856d2a0d01f6af590611e69fa6b7 100644 (file)
@@ -1287,7 +1287,7 @@ static int smc_core_reboot_event(struct notifier_block *this,
                                 unsigned long event, void *ptr)
 {
        smc_lgrs_shutdown();
-
+       smc_ib_unregister_client();
        return 0;
 }
 
index 4d38d49d6ad91508d4d6cccbc79318c6a5f3f73e..50623218747f067c0ecf33a51d8d6b61e39ce139 100644 (file)
@@ -957,7 +957,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
                             .msg_iocb = iocb};
        ssize_t res;
 
-       if (file->f_flags & O_NONBLOCK)
+       if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
                msg.msg_flags = MSG_DONTWAIT;
 
        if (iocb->ki_pos != 0)
@@ -982,7 +982,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (iocb->ki_pos != 0)
                return -ESPIPE;
 
-       if (file->f_flags & O_NONBLOCK)
+       if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
                msg.msg_flags = MSG_DONTWAIT;
 
        if (sock->type == SOCK_SEQPACKET)
index 77c7dd7f05e8be8a9793eccee4ba77d4aa6ca15b..fda3889993cbbef28c909cbd334d2820ce0ff3a4 100644 (file)
@@ -77,7 +77,7 @@
 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
                                       struct rpcrdma_sendctx *sc);
 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
-static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
+static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
 static struct rpcrdma_regbuf *
@@ -244,6 +244,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                        ia->ri_id->device->name,
                        rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
 #endif
+               init_completion(&ia->ri_remove_done);
                set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
                ep->rep_connected = -ENODEV;
                xprt_force_disconnect(xprt);
@@ -297,7 +298,6 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
        int rc;
 
        init_completion(&ia->ri_done);
-       init_completion(&ia->ri_remove_done);
 
        id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
                            xprt, RDMA_PS_TCP, IB_QPT_RC);
@@ -421,7 +421,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
        /* The ULP is responsible for ensuring all DMA
         * mappings and MRs are gone.
         */
-       rpcrdma_reps_destroy(buf);
+       rpcrdma_reps_unmap(r_xprt);
        list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
                rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
                rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
@@ -599,6 +599,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
                                    struct ib_qp_init_attr *qp_init_attr)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+       struct rpcrdma_ep *ep = &r_xprt->rx_ep;
        int rc, err;
 
        trace_xprtrdma_reinsert(r_xprt);
@@ -613,6 +614,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
                pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
                goto out2;
        }
+       memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr));
 
        rc = -ENETUNREACH;
        err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
@@ -1090,6 +1092,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
        rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
        rep->rr_recv_wr.num_sge = 1;
        rep->rr_temp = temp;
+       list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
        return rep;
 
 out_free:
@@ -1100,6 +1103,7 @@ out:
 
 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
 {
+       list_del(&rep->rr_all);
        rpcrdma_regbuf_free(rep->rr_rdmabuf);
        kfree(rep);
 }
@@ -1118,10 +1122,16 @@ static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
 static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
                            struct rpcrdma_rep *rep)
 {
-       if (!rep->rr_temp)
-               llist_add(&rep->rr_node, &buf->rb_free_reps);
-       else
-               rpcrdma_rep_destroy(rep);
+       llist_add(&rep->rr_node, &buf->rb_free_reps);
+}
+
+static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
+{
+       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+       struct rpcrdma_rep *rep;
+
+       list_for_each_entry(rep, &buf->rb_all_reps, rr_all)
+               rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
 }
 
 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
@@ -1152,6 +1162,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
 
        INIT_LIST_HEAD(&buf->rb_send_bufs);
        INIT_LIST_HEAD(&buf->rb_allreqs);
+       INIT_LIST_HEAD(&buf->rb_all_reps);
 
        rc = -ENOMEM;
        for (i = 0; i < buf->rb_max_requests; i++) {
@@ -1504,6 +1515,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
        wr = NULL;
        while (needed) {
                rep = rpcrdma_rep_get_locked(buf);
+               if (rep && rep->rr_temp) {
+                       rpcrdma_rep_destroy(rep);
+                       continue;
+               }
                if (!rep)
                        rep = rpcrdma_rep_create(r_xprt, temp);
                if (!rep)
index 5d15140a026601819f851fa5f3b98682e7898167..d796d68609edbf9c0515b26f6488e095ba2c9983 100644 (file)
@@ -203,6 +203,7 @@ struct rpcrdma_rep {
        struct xdr_stream       rr_stream;
        struct llist_node       rr_node;
        struct ib_recv_wr       rr_recv_wr;
+       struct list_head        rr_all;
 };
 
 /* To reduce the rate at which a transport invokes ib_post_recv
@@ -368,6 +369,7 @@ struct rpcrdma_buffer {
 
        struct list_head        rb_allreqs;
        struct list_head        rb_all_mrs;
+       struct list_head        rb_all_reps;
 
        struct llist_head       rb_free_reps;
 
index 11255e970dd45b489450f1b746ec63e481853f49..ee49a9f1dd4fe071edf7e02aa6371f3fd3442ef0 100644 (file)
@@ -9,7 +9,7 @@ tipc-y  += addr.o bcast.o bearer.o \
           core.o link.o discover.o msg.o  \
           name_distr.o  subscr.o monitor.o name_table.o net.o  \
           netlink.o netlink_compat.o node.o socket.o eth_media.o \
-          topsrv.o socket.o group.o trace.o
+          topsrv.o group.o trace.o
 
 CFLAGS_trace.o += -I$(src)
 
@@ -20,5 +20,3 @@ tipc-$(CONFIG_TIPC_CRYPTO)    += crypto.o
 
 
 obj-$(CONFIG_TIPC_DIAG)        += diag.o
-
-tipc_diag-y    := diag.o
index 55aeba681cf4ede019470b5bf1728a78664daa65..656ebc79c64ebf396dc9687de12f3bb07654857c 100644 (file)
@@ -305,17 +305,17 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
  * @skb: socket buffer to copy
  * @method: send method to be used
  * @dests: destination nodes for message.
- * @cong_link_cnt: returns number of encountered congested destination links
  * Returns 0 if success, otherwise errno
  */
 static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
                                struct tipc_mc_method *method,
-                               struct tipc_nlist *dests,
-                               u16 *cong_link_cnt)
+                               struct tipc_nlist *dests)
 {
        struct tipc_msg *hdr, *_hdr;
        struct sk_buff_head tmpq;
        struct sk_buff *_skb;
+       u16 cong_link_cnt;
+       int rc = 0;
 
        /* Is a cluster supporting with new capabilities ? */
        if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
@@ -343,18 +343,19 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
        _hdr = buf_msg(_skb);
        msg_set_size(_hdr, MCAST_H_SIZE);
        msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
+       msg_set_errcode(_hdr, TIPC_ERR_NO_PORT);
 
        __skb_queue_head_init(&tmpq);
        __skb_queue_tail(&tmpq, _skb);
        if (method->rcast)
-               tipc_bcast_xmit(net, &tmpq, cong_link_cnt);
+               rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt);
        else
-               tipc_rcast_xmit(net, &tmpq, dests, cong_link_cnt);
+               rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt);
 
        /* This queue should normally be empty by now */
        __skb_queue_purge(&tmpq);
 
-       return 0;
+       return rc;
 }
 
 /* tipc_mcast_xmit - deliver message to indicated destination nodes
@@ -396,9 +397,14 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
                msg_set_is_rcast(hdr, method->rcast);
 
                /* Switch method ? */
-               if (rcast != method->rcast)
-                       tipc_mcast_send_sync(net, skb, method,
-                                            dests, cong_link_cnt);
+               if (rcast != method->rcast) {
+                       rc = tipc_mcast_send_sync(net, skb, method, dests);
+                       if (unlikely(rc)) {
+                               pr_err("Unable to send SYN: method %d, rc %d\n",
+                                      rcast, rc);
+                               goto exit;
+                       }
+               }
 
                if (method->rcast)
                        rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
index 990a872cec46ebce6280b4cab6b6b44bd052da36..c8c47fc7265361229c9e19f7e46eeb840089d93d 100644 (file)
@@ -257,9 +257,6 @@ static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
 #define tipc_aead_rcu_ptr(rcu_ptr, lock)                               \
        rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
 
-#define tipc_aead_rcu_swap(rcu_ptr, ptr, lock)                         \
-       rcu_swap_protected((rcu_ptr), (ptr), lockdep_is_held(lock))
-
 #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock)                      \
 do {                                                                   \
        typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr),    \
@@ -1189,7 +1186,7 @@ static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
 
        /* Move passive key if any */
        if (key.passive) {
-               tipc_aead_rcu_swap(rx->aead[key.passive], tmp2, &rx->lock);
+               tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
                x = (key.passive - key.pending + new_pending) % KEY_MAX;
                new_passive = (x <= 0) ? x + KEY_MAX : x;
        }
index b043e8c6397a8d19b5f6ed40df09f5a561f0ee23..bfe43da127c04a702a5c191a7a699e4fb154c718 100644 (file)
@@ -194,6 +194,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
 {
        struct tipc_net *tn = tipc_net(net);
        struct tipc_msg *hdr = buf_msg(skb);
+       u32 pnet_hash = msg_peer_net_hash(hdr);
        u16 caps = msg_node_capabilities(hdr);
        bool legacy = tn->legacy_addr_format;
        u32 sugg = msg_sugg_node_addr(hdr);
@@ -242,9 +243,8 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
                return;
        if (!tipc_in_scope(legacy, b->domain, src))
                return;
-       tipc_node_check_dest(net, src, peer_id, b, caps, signature,
-                            msg_peer_net_hash(hdr), &maddr, &respond,
-                            &dupl_addr);
+       tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash,
+                            &maddr, &respond, &dupl_addr);
        if (dupl_addr)
                disc_dupl_alert(b, src, &maddr);
        if (!respond)
index 92d04dc2a44b7a3e68e425035316a3191fef46f4..359b2bc888cfcc78dd391ee7b824881e24ab13cb 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <net/sock.h>
 #include <linux/list_sort.h>
+#include <linux/rbtree_augmented.h>
 #include "core.h"
 #include "netlink.h"
 #include "name_table.h"
@@ -51,6 +52,7 @@
  * @lower: service range lower bound
  * @upper: service range upper bound
  * @tree_node: member of service range RB tree
+ * @max: largest 'upper' in this node subtree
  * @local_publ: list of identical publications made from this node
  *   Used by closest_first lookup and multicast lookup algorithm
  * @all_publ: all publications identical to this one, whatever node and scope
@@ -60,6 +62,7 @@ struct service_range {
        u32 lower;
        u32 upper;
        struct rb_node tree_node;
+       u32 max;
        struct list_head local_publ;
        struct list_head all_publ;
 };
@@ -84,6 +87,130 @@ struct tipc_service {
        struct rcu_head rcu;
 };
 
+#define service_range_upper(sr) ((sr)->upper)
+RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks,
+                        struct service_range, tree_node, u32, max,
+                        service_range_upper)
+
+#define service_range_entry(rbtree_node)                               \
+       (container_of(rbtree_node, struct service_range, tree_node))
+
+#define service_range_overlap(sr, start, end)                          \
+       ((sr)->lower <= (end) && (sr)->upper >= (start))
+
+/**
+ * service_range_foreach_match - iterate over tipc service rbtree for each
+ *                               range match
+ * @sr: the service range pointer as a loop cursor
+ * @sc: the pointer to tipc service which holds the service range rbtree
+ * @start, end: the range (end >= start) for matching
+ */
+#define service_range_foreach_match(sr, sc, start, end)                        \
+       for (sr = service_range_match_first((sc)->ranges.rb_node,       \
+                                           start,                      \
+                                           end);                       \
+            sr;                                                        \
+            sr = service_range_match_next(&(sr)->tree_node,            \
+                                          start,                       \
+                                          end))
+
+/**
+ * service_range_match_first - find first service range matching a range
+ * @n: the root node of service range rbtree for searching
+ * @start, end: the range (end >= start) for matching
+ *
+ * Return: the leftmost service range node in the rbtree that overlaps the
+ * specific range if any. Otherwise, returns NULL.
+ */
+static struct service_range *service_range_match_first(struct rb_node *n,
+                                                      u32 start, u32 end)
+{
+       struct service_range *sr;
+       struct rb_node *l, *r;
+
+       /* Non overlaps in tree at all? */
+       if (!n || service_range_entry(n)->max < start)
+               return NULL;
+
+       while (n) {
+               l = n->rb_left;
+               if (l && service_range_entry(l)->max >= start) {
+                       /* A leftmost overlap range node must be one in the left
+                        * subtree. If not, it has lower > end, then nodes on
+                        * the right side cannot satisfy the condition either.
+                        */
+                       n = l;
+                       continue;
+               }
+
+               /* No one in the left subtree can match, return if this node is
+                * an overlap i.e. leftmost.
+                */
+               sr = service_range_entry(n);
+               if (service_range_overlap(sr, start, end))
+                       return sr;
+
+               /* Ok, try to lookup on the right side */
+               r = n->rb_right;
+               if (sr->lower <= end &&
+                   r && service_range_entry(r)->max >= start) {
+                       n = r;
+                       continue;
+               }
+               break;
+       }
+
+       return NULL;
+}
+
+/**
+ * service_range_match_next - find next service range matching a range
+ * @n: a node in service range rbtree from which the searching starts
+ * @start, end: the range (end >= start) for matching
+ *
+ * Return: the next service range node to the given node in the rbtree that
+ * overlaps the specific range if any. Otherwise, returns NULL.
+ */
+static struct service_range *service_range_match_next(struct rb_node *n,
+                                                     u32 start, u32 end)
+{
+       struct service_range *sr;
+       struct rb_node *p, *r;
+
+       while (n) {
+               r = n->rb_right;
+               if (r && service_range_entry(r)->max >= start)
+                       /* A next overlap range node must be one in the right
+                        * subtree. If not, it has lower > end, then any next
+                        * successor (- an ancestor) of this node cannot
+                        * satisfy the condition either.
+                        */
+                       return service_range_match_first(r, start, end);
+
+               /* No one in the right subtree can match, go up to find an
+                * ancestor of this node which is parent of a left-hand child.
+                */
+               while ((p = rb_parent(n)) && n == p->rb_right)
+                       n = p;
+               if (!p)
+                       break;
+
+               /* Return if this ancestor is an overlap */
+               sr = service_range_entry(p);
+               if (service_range_overlap(sr, start, end))
+                       return sr;
+
+               /* Ok, try to lookup more from this ancestor */
+               if (sr->lower <= end) {
+                       n = p;
+                       continue;
+               }
+               break;
+       }
+
+       return NULL;
+}
+
 static int hash(int x)
 {
        return x & (TIPC_NAMETBL_SIZE - 1);
@@ -139,84 +266,51 @@ static struct tipc_service *tipc_service_create(u32 type, struct hlist_head *hd)
        return service;
 }
 
-/**
- * tipc_service_first_range - find first service range in tree matching instance
- *
- * Very time-critical, so binary search through range rb tree
- */
-static struct service_range *tipc_service_first_range(struct tipc_service *sc,
-                                                     u32 instance)
-{
-       struct rb_node *n = sc->ranges.rb_node;
-       struct service_range *sr;
-
-       while (n) {
-               sr = container_of(n, struct service_range, tree_node);
-               if (sr->lower > instance)
-                       n = n->rb_left;
-               else if (sr->upper < instance)
-                       n = n->rb_right;
-               else
-                       return sr;
-       }
-       return NULL;
-}
-
 /*  tipc_service_find_range - find service range matching publication parameters
  */
 static struct service_range *tipc_service_find_range(struct tipc_service *sc,
                                                     u32 lower, u32 upper)
 {
-       struct rb_node *n = sc->ranges.rb_node;
        struct service_range *sr;
 
-       sr = tipc_service_first_range(sc, lower);
-       if (!sr)
-               return NULL;
-
-       /* Look for exact match */
-       for (n = &sr->tree_node; n; n = rb_next(n)) {
-               sr = container_of(n, struct service_range, tree_node);
-               if (sr->upper == upper)
-                       break;
+       service_range_foreach_match(sr, sc, lower, upper) {
+               /* Look for exact match */
+               if (sr->lower == lower && sr->upper == upper)
+                       return sr;
        }
-       if (!n || sr->lower != lower || sr->upper != upper)
-               return NULL;
 
-       return sr;
+       return NULL;
 }
 
 static struct service_range *tipc_service_create_range(struct tipc_service *sc,
                                                       u32 lower, u32 upper)
 {
        struct rb_node **n, *parent = NULL;
-       struct service_range *sr, *tmp;
+       struct service_range *sr;
 
        n = &sc->ranges.rb_node;
        while (*n) {
-               tmp = container_of(*n, struct service_range, tree_node);
                parent = *n;
-               tmp = container_of(parent, struct service_range, tree_node);
-               if (lower < tmp->lower)
-                       n = &(*n)->rb_left;
-               else if (lower > tmp->lower)
-                       n = &(*n)->rb_right;
-               else if (upper < tmp->upper)
-                       n = &(*n)->rb_left;
-               else if (upper > tmp->upper)
-                       n = &(*n)->rb_right;
+               sr = service_range_entry(parent);
+               if (lower == sr->lower && upper == sr->upper)
+                       return sr;
+               if (sr->max < upper)
+                       sr->max = upper;
+               if (lower <= sr->lower)
+                       n = &parent->rb_left;
                else
-                       return tmp;
+                       n = &parent->rb_right;
        }
        sr = kzalloc(sizeof(*sr), GFP_ATOMIC);
        if (!sr)
                return NULL;
        sr->lower = lower;
        sr->upper = upper;
+       sr->max = upper;
        INIT_LIST_HEAD(&sr->local_publ);
        INIT_LIST_HEAD(&sr->all_publ);
        rb_link_node(&sr->tree_node, parent, n);
-       rb_insert_color(&sr->tree_node, &sc->ranges);
+       rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
        return sr;
 }
 
@@ -310,7 +404,6 @@ static void tipc_service_subscribe(struct tipc_service *service,
        struct list_head publ_list;
        struct service_range *sr;
        struct tipc_name_seq ns;
-       struct rb_node *n;
        u32 filter;
 
        ns.type = tipc_sub_read(sb, seq.type);
@@ -325,13 +418,7 @@ static void tipc_service_subscribe(struct tipc_service *service,
                return;
 
        INIT_LIST_HEAD(&publ_list);
-       for (n = rb_first(&service->ranges); n; n = rb_next(n)) {
-               sr = container_of(n, struct service_range, tree_node);
-               if (sr->lower > ns.upper)
-                       break;
-               if (!tipc_sub_check_overlap(&ns, sr->lower, sr->upper))
-                       continue;
-
+       service_range_foreach_match(sr, service, ns.lower, ns.upper) {
                first = NULL;
                list_for_each_entry(p, &sr->all_publ, all_publ) {
                        if (filter & TIPC_SUB_PORTS)
@@ -425,7 +512,7 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
 
        /* Remove service range item if this was its last publication */
        if (list_empty(&sr->all_publ)) {
-               rb_erase(&sr->tree_node, &sc->ranges);
+               rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
                kfree(sr);
        }
 
@@ -473,34 +560,39 @@ u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *dnode)
        rcu_read_lock();
        sc = tipc_service_find(net, type);
        if (unlikely(!sc))
-               goto not_found;
+               goto exit;
 
        spin_lock_bh(&sc->lock);
-       sr = tipc_service_first_range(sc, instance);
-       if (unlikely(!sr))
-               goto no_match;
-
-       /* Select lookup algorithm: local, closest-first or round-robin */
-       if (*dnode == self) {
-               list = &sr->local_publ;
-               if (list_empty(list))
-                       goto no_match;
-               p = list_first_entry(list, struct publication, local_publ);
-               list_move_tail(&p->local_publ, &sr->local_publ);
-       } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) {
-               list = &sr->local_publ;
-               p = list_first_entry(list, struct publication, local_publ);
-               list_move_tail(&p->local_publ, &sr->local_publ);
-       } else {
-               list = &sr->all_publ;
-               p = list_first_entry(list, struct publication, all_publ);
-               list_move_tail(&p->all_publ, &sr->all_publ);
+       service_range_foreach_match(sr, sc, instance, instance) {
+               /* Select lookup algo: local, closest-first or round-robin */
+               if (*dnode == self) {
+                       list = &sr->local_publ;
+                       if (list_empty(list))
+                               continue;
+                       p = list_first_entry(list, struct publication,
+                                            local_publ);
+                       list_move_tail(&p->local_publ, &sr->local_publ);
+               } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) {
+                       list = &sr->local_publ;
+                       p = list_first_entry(list, struct publication,
+                                            local_publ);
+                       list_move_tail(&p->local_publ, &sr->local_publ);
+               } else {
+                       list = &sr->all_publ;
+                       p = list_first_entry(list, struct publication,
+                                            all_publ);
+                       list_move_tail(&p->all_publ, &sr->all_publ);
+               }
+               port = p->port;
+               node = p->node;
+               /* Todo: as for legacy, pick the first matching range only, a
+                * "true" round-robin will be performed as needed.
+                */
+               break;
        }
-       port = p->port;
-       node = p->node;
-no_match:
        spin_unlock_bh(&sc->lock);
-not_found:
+
+exit:
        rcu_read_unlock();
        *dnode = node;
        return port;
@@ -523,7 +615,8 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope,
 
        spin_lock_bh(&sc->lock);
 
-       sr = tipc_service_first_range(sc, instance);
+       /* Todo: a full search i.e. service_range_foreach_match() instead? */
+       sr = service_range_match_first(sc->ranges.rb_node, instance, instance);
        if (!sr)
                goto no_match;
 
@@ -552,7 +645,6 @@ void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
        struct service_range *sr;
        struct tipc_service *sc;
        struct publication *p;
-       struct rb_node *n;
 
        rcu_read_lock();
        sc = tipc_service_find(net, type);
@@ -560,13 +652,7 @@ void tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
                goto exit;
 
        spin_lock_bh(&sc->lock);
-
-       for (n = rb_first(&sc->ranges); n; n = rb_next(n)) {
-               sr = container_of(n, struct service_range, tree_node);
-               if (sr->upper < lower)
-                       continue;
-               if (sr->lower > upper)
-                       break;
+       service_range_foreach_match(sr, sc, lower, upper) {
                list_for_each_entry(p, &sr->local_publ, local_publ) {
                        if (p->scope == scope || (!exact && p->scope < scope))
                                tipc_dest_push(dports, 0, p->port);
@@ -587,7 +673,6 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
        struct service_range *sr;
        struct tipc_service *sc;
        struct publication *p;
-       struct rb_node *n;
 
        rcu_read_lock();
        sc = tipc_service_find(net, type);
@@ -595,13 +680,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
                goto exit;
 
        spin_lock_bh(&sc->lock);
-
-       for (n = rb_first(&sc->ranges); n; n = rb_next(n)) {
-               sr = container_of(n, struct service_range, tree_node);
-               if (sr->upper < lower)
-                       continue;
-               if (sr->lower > upper)
-                       break;
+       service_range_foreach_match(sr, sc, lower, upper) {
                list_for_each_entry(p, &sr->all_publ, all_publ) {
                        tipc_nlist_add(nodes, p->node);
                }
@@ -799,7 +878,7 @@ static void tipc_service_delete(struct net *net, struct tipc_service *sc)
                        tipc_service_remove_publ(sr, p->node, p->key);
                        kfree_rcu(p, rcu);
                }
-               rb_erase(&sr->tree_node, &sc->ranges);
+               rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
                kfree(sr);
        }
        hlist_del_init_rcu(&sc->service_list);
index 0254bb7e418bebe772bf46c7ccd997cf95528cb1..217516357ef26223a309be055cb8f6bbc7a78007 100644 (file)
@@ -204,8 +204,8 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
        }
 
-       attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
-                               sizeof(struct nlattr *), GFP_KERNEL);
+       attrbuf = kcalloc(tipc_genl_family.maxattr + 1,
+                         sizeof(struct nlattr *), GFP_KERNEL);
        if (!attrbuf) {
                err = -ENOMEM;
                goto err_out;
index 41688da233aba37f92416d01a59a9e6b68a74e31..f9b4fb92c0b1c98f373bc2346e5d5e77c86d48f8 100644 (file)
@@ -287,12 +287,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
  *
  * Caller must hold socket lock
  */
-static void tsk_rej_rx_queue(struct sock *sk)
+static void tsk_rej_rx_queue(struct sock *sk, int error)
 {
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
-               tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
+               tipc_sk_respond(sk, skb, error);
 }
 
 static bool tipc_sk_connected(struct sock *sk)
@@ -545,34 +545,45 @@ static void __tipc_shutdown(struct socket *sock, int error)
        /* Remove pending SYN */
        __skb_queue_purge(&sk->sk_write_queue);
 
-       /* Reject all unreceived messages, except on an active connection
-        * (which disconnects locally & sends a 'FIN+' to peer).
-        */
-       while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-               if (TIPC_SKB_CB(skb)->bytes_read) {
-                       kfree_skb(skb);
-                       continue;
-               }
-               if (!tipc_sk_type_connectionless(sk) &&
-                   sk->sk_state != TIPC_DISCONNECTING) {
-                       tipc_set_sk_state(sk, TIPC_DISCONNECTING);
-                       tipc_node_remove_conn(net, dnode, tsk->portid);
-               }
-               tipc_sk_respond(sk, skb, error);
+       /* Remove partially received buffer if any */
+       skb = skb_peek(&sk->sk_receive_queue);
+       if (skb && TIPC_SKB_CB(skb)->bytes_read) {
+               __skb_unlink(skb, &sk->sk_receive_queue);
+               kfree_skb(skb);
        }
 
-       if (tipc_sk_type_connectionless(sk))
+       /* Reject all unreceived messages if connectionless */
+       if (tipc_sk_type_connectionless(sk)) {
+               tsk_rej_rx_queue(sk, error);
                return;
+       }
 
-       if (sk->sk_state != TIPC_DISCONNECTING) {
+       switch (sk->sk_state) {
+       case TIPC_CONNECTING:
+       case TIPC_ESTABLISHED:
+               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               tipc_node_remove_conn(net, dnode, tsk->portid);
+               /* Send a FIN+/- to its peer */
+               skb = __skb_dequeue(&sk->sk_receive_queue);
+               if (skb) {
+                       __skb_queue_purge(&sk->sk_receive_queue);
+                       tipc_sk_respond(sk, skb, error);
+                       break;
+               }
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
                                      tsk_own_node(tsk), tsk_peer_port(tsk),
                                      tsk->portid, error);
                if (skb)
                        tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
-               tipc_node_remove_conn(net, dnode, tsk->portid);
-               tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+               break;
+       case TIPC_LISTEN:
+               /* Reject all SYN messages */
+               tsk_rej_rx_queue(sk, error);
+               break;
+       default:
+               __skb_queue_purge(&sk->sk_receive_queue);
+               break;
        }
 }
 
@@ -1364,8 +1375,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
        struct tipc_msg *hdr = &tsk->phdr;
        struct tipc_name_seq *seq;
        struct sk_buff_head pkts;
-       u32 dport, dnode = 0;
-       u32 type, inst;
+       u32 dport = 0, dnode = 0;
+       u32 type = 0, inst = 0;
        int mtu, rc;
 
        if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
@@ -1418,23 +1429,11 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
                type = dest->addr.name.name.type;
                inst = dest->addr.name.name.instance;
                dnode = dest->addr.name.domain;
-               msg_set_type(hdr, TIPC_NAMED_MSG);
-               msg_set_hdr_sz(hdr, NAMED_H_SIZE);
-               msg_set_nametype(hdr, type);
-               msg_set_nameinst(hdr, inst);
-               msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
                dport = tipc_nametbl_translate(net, type, inst, &dnode);
-               msg_set_destnode(hdr, dnode);
-               msg_set_destport(hdr, dport);
                if (unlikely(!dport && !dnode))
                        return -EHOSTUNREACH;
        } else if (dest->addrtype == TIPC_ADDR_ID) {
                dnode = dest->addr.id.node;
-               msg_set_type(hdr, TIPC_DIRECT_MSG);
-               msg_set_lookup_scope(hdr, 0);
-               msg_set_destnode(hdr, dnode);
-               msg_set_destport(hdr, dest->addr.id.ref);
-               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        } else {
                return -EINVAL;
        }
@@ -1445,6 +1444,22 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
        if (unlikely(rc))
                return rc;
 
+       if (dest->addrtype == TIPC_ADDR_NAME) {
+               msg_set_type(hdr, TIPC_NAMED_MSG);
+               msg_set_hdr_sz(hdr, NAMED_H_SIZE);
+               msg_set_nametype(hdr, type);
+               msg_set_nameinst(hdr, inst);
+               msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
+               msg_set_destnode(hdr, dnode);
+               msg_set_destport(hdr, dport);
+       } else { /* TIPC_ADDR_ID */
+               msg_set_type(hdr, TIPC_DIRECT_MSG);
+               msg_set_lookup_scope(hdr, 0);
+               msg_set_destnode(hdr, dnode);
+               msg_set_destport(hdr, dest->addr.id.ref);
+               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+       }
+
        __skb_queue_head_init(&pkts);
        mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
        rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
@@ -2428,8 +2443,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
                        return sock_intr_errno(*timeo_p);
 
                add_wait_queue(sk_sleep(sk), &wait);
-               done = sk_wait_event(sk, timeo_p,
-                                    sk->sk_state != TIPC_CONNECTING, &wait);
+               done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
+                                    &wait);
                remove_wait_queue(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
@@ -2639,7 +2654,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
         * Reject any stray messages received by new socket
         * before the socket lock was taken (very, very unlikely)
         */
-       tsk_rej_rx_queue(new_sk);
+       tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
 
        /* Connect new socket to it's peer */
        tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
index dac24c7aa7d4bb45b3188dae3fb1f6030cd3c299..94774c0e5ff32706dc1a36ebc3133dadde218501 100644 (file)
@@ -732,15 +732,19 @@ out:
        return rc;
 }
 
-static void tls_update(struct sock *sk, struct proto *p)
+static void tls_update(struct sock *sk, struct proto *p,
+                      void (*write_space)(struct sock *sk))
 {
        struct tls_context *ctx;
 
        ctx = tls_get_ctx(sk);
-       if (likely(ctx))
+       if (likely(ctx)) {
+               ctx->sk_write_space = write_space;
                ctx->sk_proto = p;
-       else
+       } else {
                sk->sk_prot = p;
+               sk->sk_write_space = write_space;
+       }
 }
 
 static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
index c6803a82b769b15a117fc444ad98861a25c16f28..c98e602a1a2ded6f90743723445dd4b4a6a3a4c2 100644 (file)
@@ -256,8 +256,6 @@ static int tls_do_decryption(struct sock *sk,
                        return ret;
 
                ret = crypto_wait_req(ret, &ctx->async_wait);
-       } else if (ret == -EBADMSG) {
-               TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
        }
 
        if (async)
@@ -682,12 +680,32 @@ static int tls_push_record(struct sock *sk, int flags,
 
        split_point = msg_pl->apply_bytes;
        split = split_point && split_point < msg_pl->sg.size;
+       if (unlikely((!split &&
+                     msg_pl->sg.size +
+                     prot->overhead_size > msg_en->sg.size) ||
+                    (split &&
+                     split_point +
+                     prot->overhead_size > msg_en->sg.size))) {
+               split = true;
+               split_point = msg_en->sg.size;
+       }
        if (split) {
                rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
                                           split_point, prot->overhead_size,
                                           &orig_end);
                if (rc < 0)
                        return rc;
+               /* This can happen if above tls_split_open_record allocates
+                * a single large encryption buffer instead of two smaller
+                * ones. In this case adjust pointers and continue without
+                * split.
+                */
+               if (!msg_pl->sg.size) {
+                       tls_merge_open_record(sk, rec, tmp, orig_end);
+                       msg_pl = &rec->msg_plaintext;
+                       msg_en = &rec->msg_encrypted;
+                       split = false;
+               }
                sk_msg_trim(sk, msg_en, msg_pl->sg.size +
                            prot->overhead_size);
        }
@@ -709,6 +727,12 @@ static int tls_push_record(struct sock *sk, int flags,
                sg_mark_end(sk_msg_elem(msg_pl, i));
        }
 
+       if (msg_pl->sg.end < msg_pl->sg.start) {
+               sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
+                        MAX_SKB_FRAGS - msg_pl->sg.start + 1,
+                        msg_pl->sg.data);
+       }
+
        i = msg_pl->sg.start;
        sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
 
@@ -772,7 +796,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
        psock = sk_psock_get(sk);
        if (!psock || !policy) {
                err = tls_push_record(sk, flags, record_type);
-               if (err) {
+               if (err && err != -EINPROGRESS) {
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
                }
@@ -783,10 +807,7 @@ more_data:
        if (psock->eval == __SK_NONE) {
                delta = msg->sg.size;
                psock->eval = sk_psock_msg_verdict(sk, psock, msg);
-               if (delta < msg->sg.size)
-                       delta -= msg->sg.size;
-               else
-                       delta = 0;
+               delta -= msg->sg.size;
        }
        if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
            !enospc && !full_record) {
@@ -801,7 +822,7 @@ more_data:
        switch (psock->eval) {
        case __SK_PASS:
                err = tls_push_record(sk, flags, record_type);
-               if (err < 0) {
+               if (err && err != -EINPROGRESS) {
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
                        goto out_err;
@@ -1515,7 +1536,9 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
                                if (err == -EINPROGRESS)
                                        tls_advance_record_sn(sk, prot,
                                                              &tls_ctx->rx);
-
+                               else if (err == -EBADMSG)
+                                       TLS_INC_STATS(sock_net(sk),
+                                                     LINUX_MIB_TLSDECRYPTERROR);
                                return err;
                        }
                } else {
index 7cfdce10de36b70104229e122d76aea272d80a05..774babbee045ff2649a078d212487c92c0932c1a 100644 (file)
@@ -2865,7 +2865,7 @@ static int __init af_unix_init(void)
 {
        int rc = -1;
 
-       BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
+       BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
 
        rc = proto_register(&unix_proto, 1);
        if (rc != 0) {
index b3bdae74c2435e2445a8badd7f6d3d3561be8908..3492c021925f4b2163ff5aff46d334ae64bbdc41 100644 (file)
@@ -138,28 +138,15 @@ struct hvsock {
  ****************************************************************************
  * The only valid Service GUIDs, from the perspectives of both the host and *
  * Linux VM, that can be connected by the other end, must conform to this   *
- * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in    *
- * this range [0, 0x7FFFFFFF].                                              *
+ * format: <port>-facb-11e6-bd58-64006a7986d3.                              *
  ****************************************************************************
  *
  * When we write apps on the host to connect(), the GUID ServiceID is used.
  * When we write apps in Linux VM to connect(), we only need to specify the
  * port and the driver will form the GUID and use that to request the host.
  *
- * From the perspective of Linux VM:
- * 1. the local ephemeral port (i.e. the local auto-bound port when we call
- * connect() without explicit bind()) is generated by __vsock_bind_stream(),
- * and the range is [1024, 0xFFFFFFFF).
- * 2. the remote ephemeral port (i.e. the auto-generated remote port for
- * a connect request initiated by the host's connect()) is generated by
- * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
  */
 
-#define MAX_LISTEN_PORT                        ((u32)0x7FFFFFFF)
-#define MAX_VM_LISTEN_PORT             MAX_LISTEN_PORT
-#define MAX_HOST_LISTEN_PORT           MAX_LISTEN_PORT
-#define MIN_HOST_EPHEMERAL_PORT                (MAX_HOST_LISTEN_PORT + 1)
-
 /* 00000000-facb-11e6-bd58-64006a7986d3 */
 static const guid_t srv_id_template =
        GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
@@ -184,34 +171,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
        vsock_addr_init(addr, VMADDR_CID_ANY, port);
 }
 
-static void hvs_remote_addr_init(struct sockaddr_vm *remote,
-                                struct sockaddr_vm *local)
-{
-       static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
-       struct sock *sk;
-
-       /* Remote peer is always the host */
-       vsock_addr_init(remote, VMADDR_CID_HOST, VMADDR_PORT_ANY);
-
-       while (1) {
-               /* Wrap around ? */
-               if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
-                   host_ephemeral_port == VMADDR_PORT_ANY)
-                       host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
-
-               remote->svm_port = host_ephemeral_port++;
-
-               sk = vsock_find_connected_socket(remote, local);
-               if (!sk) {
-                       /* Found an available ephemeral port */
-                       return;
-               }
-
-               /* Release refcnt got in vsock_find_connected_socket */
-               sock_put(sk);
-       }
-}
-
 static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
 {
        set_channel_pending_send_size(chan,
@@ -341,12 +300,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
        if_type = &chan->offermsg.offer.if_type;
        if_instance = &chan->offermsg.offer.if_instance;
        conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
-
-       /* The host or the VM should only listen on a port in
-        * [0, MAX_LISTEN_PORT]
-        */
-       if (!is_valid_srv_id(if_type) ||
-           get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
+       if (!is_valid_srv_id(if_type))
                return;
 
        hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
@@ -371,8 +325,11 @@ static void hvs_open_connection(struct vmbus_channel *chan)
                vnew = vsock_sk(new);
 
                hvs_addr_init(&vnew->local_addr, if_type);
-               hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
 
+               /* Remote peer is always the host */
+               vsock_addr_init(&vnew->remote_addr,
+                               VMADDR_CID_HOST, VMADDR_PORT_ANY);
+               vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
                ret = vsock_assign_transport(vnew, vsock_sk(sk));
                /* Transport assigned (looking at remote_addr) must be the
                 * same where we received the request.
@@ -766,16 +723,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk)
 
 static bool hvs_stream_allow(u32 cid, u32 port)
 {
-       /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
-        * reserved as ephemeral ports, which are used as the host's ports
-        * when the host initiates connections.
-        *
-        * Perform this check in the guest so an immediate error is produced
-        * instead of a timeout.
-        */
-       if (port > MAX_HOST_LISTEN_PORT)
-               return false;
-
        if (cid == VMADDR_CID_HOST)
                return true;
 
index e5ea29c6bca7eb782c47398ebc1e3c262257238c..6abec3fc81d1c6d59ce8337b818a720e13234c21 100644 (file)
@@ -34,6 +34,9 @@ virtio_transport_get_ops(struct vsock_sock *vsk)
 {
        const struct vsock_transport *t = vsock_core_get_transport(vsk);
 
+       if (WARN_ON(!t))
+               return NULL;
+
        return container_of(t, struct virtio_transport, transport);
 }
 
@@ -161,15 +164,25 @@ void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
 }
 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
 
+/* This function can only be used on connecting/connected sockets,
+ * since a socket assigned to a transport is required.
+ *
+ * Do not use on listener sockets!
+ */
 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
                                          struct virtio_vsock_pkt_info *info)
 {
        u32 src_cid, src_port, dst_cid, dst_port;
+       const struct virtio_transport *t_ops;
        struct virtio_vsock_sock *vvs;
        struct virtio_vsock_pkt *pkt;
        u32 pkt_len = info->pkt_len;
 
-       src_cid = virtio_transport_get_ops(vsk)->transport.get_local_cid();
+       t_ops = virtio_transport_get_ops(vsk);
+       if (unlikely(!t_ops))
+               return -EFAULT;
+
+       src_cid = t_ops->transport.get_local_cid();
        src_port = vsk->local_addr.svm_port;
        if (!info->remote_cid) {
                dst_cid = vsk->remote_addr.svm_cid;
@@ -202,7 +215,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
 
        virtio_transport_inc_tx_pkt(vvs, pkt);
 
-       return virtio_transport_get_ops(vsk)->send_pkt(pkt);
+       return t_ops->send_pkt(pkt);
 }
 
 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
@@ -1021,18 +1034,18 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
        int ret;
 
        if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
-               virtio_transport_reset(vsk, pkt);
+               virtio_transport_reset_no_sock(t, pkt);
                return -EINVAL;
        }
 
        if (sk_acceptq_is_full(sk)) {
-               virtio_transport_reset(vsk, pkt);
+               virtio_transport_reset_no_sock(t, pkt);
                return -ENOMEM;
        }
 
        child = vsock_create_connected(sk);
        if (!child) {
-               virtio_transport_reset(vsk, pkt);
+               virtio_transport_reset_no_sock(t, pkt);
                return -ENOMEM;
        }
 
@@ -1054,7 +1067,7 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
         */
        if (ret || vchild->transport != &t->transport) {
                release_sock(child);
-               virtio_transport_reset(vsk, pkt);
+               virtio_transport_reset_no_sock(t, pkt);
                sock_put(child);
                return ret;
        }
index 350513744575a4387d283f3533c23a3f746e999f..3e25229a059de83505860e9a4c9ca9428c57153b 100644 (file)
@@ -1102,6 +1102,7 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
 
 #ifdef CONFIG_CFG80211_WEXT
        kzfree(wdev->wext.keys);
+       wdev->wext.keys = NULL;
 #endif
        /* only initialized if we have a netdev */
        if (wdev->netdev)
index da5262b2298bda73837facb23781a3dfb8f26ca3..1e97ac5435b236fdaa6b087a579e7e950a2bf669 100644 (file)
@@ -10843,6 +10843,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (err)
                        return err;
 
+               cfg80211_sinfo_release_content(&sinfo);
                if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
                        wdev->cqm_config->last_rssi_event_value =
                                (s8) sinfo.rx_beacon_signal_avg;
@@ -13796,6 +13797,8 @@ static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
+       cfg80211_sinfo_release_content(&sinfo);
+
        return rdev_probe_mesh_link(rdev, dev, dest, buf, len);
 }
 
index e853a4fe6f97f2cd716cdd5253a4d47f364b9d8a..e0d34f796d0b3ab934cf7f8945bb0b86440c0285 100644 (file)
@@ -538,6 +538,10 @@ static inline int
 rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
 {
        int ret;
+
+       if (!rdev->ops->set_wiphy_params)
+               return -EOPNOTSUPP;
+
        trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
        ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
        trace_rdev_return_int(&rdev->wiphy, ret);
@@ -1167,6 +1171,16 @@ rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
        return ret;
 }
 
+static inline void
+rdev_end_cac(struct cfg80211_registered_device *rdev,
+            struct net_device *dev)
+{
+       trace_rdev_end_cac(&rdev->wiphy, dev);
+       if (rdev->ops->end_cac)
+               rdev->ops->end_cac(&rdev->wiphy, dev);
+       trace_rdev_return_void(&rdev->wiphy);
+}
+
 static inline int
 rdev_set_mcast_rate(struct cfg80211_registered_device *rdev,
                    struct net_device *dev,
index 446c76d44e65a04878ceeaabda4f428e59e5b0f9..fff9a74891fc433e5ac05970b23aa7f13adaad78 100644 (file)
@@ -2261,14 +2261,15 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
 
 static void handle_channel_custom(struct wiphy *wiphy,
                                  struct ieee80211_channel *chan,
-                                 const struct ieee80211_regdomain *regd)
+                                 const struct ieee80211_regdomain *regd,
+                                 u32 min_bw)
 {
        u32 bw_flags = 0;
        const struct ieee80211_reg_rule *reg_rule = NULL;
        const struct ieee80211_power_rule *power_rule = NULL;
        u32 bw;
 
-       for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+       for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
                reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq),
                                              regd, bw);
                if (!IS_ERR(reg_rule))
@@ -2324,8 +2325,14 @@ static void handle_band_custom(struct wiphy *wiphy,
        if (!sband)
                return;
 
+       /*
+        * We currently assume that you always want at least 20 MHz,
+        * otherwise channel 12 might get enabled if this rule is
+        * compatible to US, which permits 2402 - 2472 MHz.
+        */
        for (i = 0; i < sband->n_channels; i++)
-               handle_channel_custom(wiphy, &sband->channels[i], regd);
+               handle_channel_custom(wiphy, &sband->channels[i], regd,
+                                     MHZ_TO_KHZ(20));
 }
 
 /* Used by drivers prior to wiphy registration */
@@ -3885,6 +3892,25 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
 }
 EXPORT_SYMBOL(regulatory_pre_cac_allowed);
 
+static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
+{
+       struct wireless_dev *wdev;
+       /* If we finished CAC or received radar, we should end any
+        * CAC running on the same channels.
+        * the check !cfg80211_chandef_dfs_usable contain 2 options:
+        * either all channels are available - those the CAC_FINISHED
+        * event has effected another wdev state, or there is a channel
+        * in unavailable state in wdev chandef - those the RADAR_DETECTED
+        * event has effected another wdev state.
+        * In both cases we should end the CAC on the wdev.
+        */
+       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+               if (wdev->cac_started &&
+                   !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef))
+                       rdev_end_cac(rdev, wdev->netdev);
+       }
+}
+
 void regulatory_propagate_dfs_state(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef,
                                    enum nl80211_dfs_state dfs_state,
@@ -3911,8 +3937,10 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy,
                cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state);
 
                if (event == NL80211_RADAR_DETECTED ||
-                   event == NL80211_RADAR_CAC_FINISHED)
+                   event == NL80211_RADAR_CAC_FINISHED) {
                        cfg80211_sched_dfs_chan_update(rdev);
+                       cfg80211_check_and_end_cac(rdev);
+               }
 
                nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL);
        }
index 7a6c38ddc65adbaf60855749a22f120a86f05a2b..d32a2ec4d96ace3b26b53bc8e895ab305961ae4b 100644 (file)
@@ -1307,14 +1307,14 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
        if (wdev->conn_owner_nlportid) {
                switch (wdev->iftype) {
                case NL80211_IFTYPE_ADHOC:
-                       cfg80211_leave_ibss(rdev, wdev->netdev, false);
+                       __cfg80211_leave_ibss(rdev, wdev->netdev, false);
                        break;
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_P2P_GO:
-                       cfg80211_stop_ap(rdev, wdev->netdev, false);
+                       __cfg80211_stop_ap(rdev, wdev->netdev, false);
                        break;
                case NL80211_IFTYPE_MESH_POINT:
-                       cfg80211_leave_mesh(rdev, wdev->netdev);
+                       __cfg80211_leave_mesh(rdev, wdev->netdev);
                        break;
                case NL80211_IFTYPE_STATION:
                case NL80211_IFTYPE_P2P_CLIENT:
index d98ad2b3143b04dc1e75f69995a13f725c739984..3ef1679b0e667916fece6df7bdc55f0fa368b72a 100644 (file)
@@ -646,6 +646,11 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
        TP_ARGS(wiphy, netdev)
 );
 
+DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac,
+            TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+            TP_ARGS(wiphy, netdev)
+);
+
 DECLARE_EVENT_CLASS(station_add_change,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
                 struct station_parameters *params),
@@ -2009,7 +2014,7 @@ TRACE_EVENT(rdev_start_nan,
                WIPHY_ENTRY
                WDEV_ENTRY
                __field(u8, master_pref)
-               __field(u8, bands);
+               __field(u8, bands)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
@@ -2031,8 +2036,8 @@ TRACE_EVENT(rdev_nan_change_conf,
                WIPHY_ENTRY
                WDEV_ENTRY
                __field(u8, master_pref)
-               __field(u8, bands);
-               __field(u32, changes);
+               __field(u8, bands)
+               __field(u32, changes)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
index 5b4ed5bbc542555a4ebd4e8fcfbb6dccf10e9f3f..8481e9ac33da5c71652186e8110b92e025eedae4 100644 (file)
@@ -564,7 +564,7 @@ __frame_add_frag(struct sk_buff *skb, struct page *page,
        struct skb_shared_info *sh = skb_shinfo(skb);
        int page_offset;
 
-       page_ref_inc(page);
+       get_page(page);
        page_offset = ptr - page_address(page);
        skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
 }
index 5e677dac2a0ceaa94b6f2d1548133b0fc33da8dc..69102fda9ebd46343a0444d7206799223e24a0fd 100644 (file)
@@ -657,7 +657,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
        return NULL;
 }
 
-static int iw_handler_get_iwstats(struct net_device *          dev,
+/* noinline to avoid a bogus warning with -O3 */
+static noinline int iw_handler_get_iwstats(struct net_device * dev,
                                  struct iw_request_info *      info,
                                  union iwreq_data *            wrqu,
                                  char *                        extra)
index c34f7d0776046f2c15b668b66f020be8008ea731..d5b09bbff3754ff4138b6c6effe1c1e52901b266 100644 (file)
@@ -659,6 +659,12 @@ static int x25_release(struct socket *sock)
                        sock_set_flag(sk, SOCK_DEAD);
                        sock_set_flag(sk, SOCK_DESTROY);
                        break;
+
+               case X25_STATE_5:
+                       x25_write_internal(sk, X25_CLEAR_REQUEST);
+                       x25_disconnect(sk, 0, 0, 0);
+                       __x25_destroy_socket(sk);
+                       goto out;
        }
 
        sock_orphan(sk);
@@ -760,6 +766,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
        if (sk->sk_state == TCP_ESTABLISHED)
                goto out;
 
+       rc = -EALREADY; /* Do nothing if call is already in progress */
+       if (sk->sk_state == TCP_SYN_SENT)
+               goto out;
+
        sk->sk_state   = TCP_CLOSE;
        sock->state = SS_UNCONNECTED;
 
@@ -806,7 +816,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
        /* Now the loop */
        rc = -EINPROGRESS;
        if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
-               goto out_put_neigh;
+               goto out;
 
        rc = x25_wait_for_connection_establishment(sk);
        if (rc)
@@ -1054,6 +1064,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
        if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) {
                x25_write_internal(make, X25_CALL_ACCEPTED);
                makex25->state = X25_STATE_3;
+       } else {
+               makex25->state = X25_STATE_5;
        }
 
        /*
index f97c43344e95dd31fb5efa3fd7f83192ab7ad333..4d3bb46aaae0d9b0d5e4c17168b6886b48677d41 100644 (file)
@@ -382,6 +382,35 @@ out_clear:
        return 0;
 }
 
+/*
+ * State machine for state 5, Call Accepted / Call Connected pending (X25_ACCPT_APPRV_FLAG).
+ * The handling of the timer(s) is in file x25_timer.c
+ * Handling of state 0 and connection release is in af_x25.c.
+ */
+static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+{
+       struct x25_sock *x25 = x25_sk(sk);
+
+       switch (frametype) {
+               case X25_CLEAR_REQUEST:
+                       if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) {
+                               x25_write_internal(sk, X25_CLEAR_REQUEST);
+                               x25->state = X25_STATE_2;
+                               x25_start_t23timer(sk);
+                               return 0;
+                       }
+
+                       x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+                       x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+                       break;
+
+               default:
+                       break;
+       }
+
+       return 0;
+}
+
 /* Higher level upcall for a LAPB frame */
 int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
 {
@@ -406,6 +435,9 @@ int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
        case X25_STATE_4:
                queued = x25_state4_machine(sk, skb, frametype);
                break;
+       case X25_STATE_5:
+               queued = x25_state5_machine(sk, skb, frametype);
+               break;
        }
 
        x25_kick(sk);
index 956793893c9dec2752b8e6a3140cc44c0e633df3..328f661b83b2ec5c86440e310515d9244aad6b5a 100644 (file)
@@ -334,12 +334,21 @@ out:
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx);
 
-static int xsk_zc_xmit(struct xdp_sock *xs)
+static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
 {
        struct net_device *dev = xs->dev;
+       int err;
+
+       rcu_read_lock();
+       err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
+       rcu_read_unlock();
+
+       return err;
+}
 
-       return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
-                                              XDP_WAKEUP_TX);
+static int xsk_zc_xmit(struct xdp_sock *xs)
+{
+       return xsk_wakeup(xs, XDP_WAKEUP_TX);
 }
 
 static void xsk_destruct_skb(struct sk_buff *skb)
@@ -453,19 +462,16 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
        __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
-       struct net_device *dev;
        struct xdp_umem *umem;
 
        if (unlikely(!xsk_is_bound(xs)))
                return mask;
 
-       dev = xs->dev;
        umem = xs->umem;
 
        if (umem->need_wakeup) {
-               if (dev->netdev_ops->ndo_xsk_wakeup)
-                       dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
-                                                       umem->need_wakeup);
+               if (xs->zc)
+                       xsk_wakeup(xs, umem->need_wakeup);
                else
                        /* Poll needs to drive Tx also in copy mode */
                        __xsk_sendmsg(sk);
index 7ac1542feaf835af0f166788bef8931d3edd53bb..dc651a628dcf07df229ff85878c744a257f01f49 100644 (file)
@@ -268,9 +268,6 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int err = -1;
        int mtu;
 
-       if (!dst)
-               goto tx_err_link_failure;
-
        dst_hold(dst);
        dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
        if (IS_ERR(dst)) {
@@ -297,7 +294,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
@@ -343,6 +340,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
        struct net_device_stats *stats = &xi->dev->stats;
+       struct dst_entry *dst = skb_dst(skb);
        struct flowi fl;
        int ret;
 
@@ -352,10 +350,33 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
        case htons(ETH_P_IPV6):
                xfrm_decode_session(skb, &fl, AF_INET6);
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+               if (!dst) {
+                       fl.u.ip6.flowi6_oif = dev->ifindex;
+                       fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               stats->tx_carrier_errors++;
+                               goto tx_err;
+                       }
+                       skb_dst_set(skb, dst);
+               }
                break;
        case htons(ETH_P_IP):
                xfrm_decode_session(skb, &fl, AF_INET);
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+               if (!dst) {
+                       struct rtable *rt;
+
+                       fl.u.ip4.flowi4_oif = dev->ifindex;
+                       fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+                       if (IS_ERR(rt)) {
+                               stats->tx_carrier_errors++;
+                               goto tx_err;
+                       }
+                       skb_dst_set(skb, &rt->dst);
+               }
                break;
        default:
                goto tx_err;
@@ -563,12 +584,9 @@ static void xfrmi_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &xfrmi_netdev_ops;
        dev->type               = ARPHRD_NONE;
-       dev->hard_header_len    = ETH_HLEN;
-       dev->min_header_len     = ETH_HLEN;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
-       dev->max_mtu            = ETH_DATA_LEN;
-       dev->addr_len           = ETH_ALEN;
+       dev->max_mtu            = IP_MAX_MTU;
        dev->flags              = IFF_NOARP;
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = xfrmi_dev_free;
index 1d78819ffef19075bfc29adfa903c16b6d05e55e..630ce8c4d5a23c25406de1724b0ba53fb5867259 100644 (file)
@@ -47,13 +47,27 @@ static __always_inline void count(void *map)
 SEC("tracepoint/syscalls/sys_enter_open")
 int trace_enter_open(struct syscalls_enter_open_args *ctx)
 {
-       count((void *)&enter_open_map);
+       count(&enter_open_map);
+       return 0;
+}
+
+SEC("tracepoint/syscalls/sys_enter_openat")
+int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
+{
+       count(&enter_open_map);
        return 0;
 }
 
 SEC("tracepoint/syscalls/sys_exit_open")
 int trace_enter_exit(struct syscalls_exit_open_args *ctx)
 {
-       count((void *)&exit_open_map);
+       count(&exit_open_map);
+       return 0;
+}
+
+SEC("tracepoint/syscalls/sys_exit_openat")
+int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
+{
+       count(&exit_open_map);
        return 0;
 }
index 16a16eadd509859c3640dac0507618d958c77b83..749a50f2f9f32cc608e290179d0ac543598b08b0 100644 (file)
@@ -37,9 +37,9 @@ static void print_ksym(__u64 addr)
        }
 
        printf("%s;", sym->name);
-       if (!strcmp(sym->name, "sys_read"))
+       if (!strstr(sym->name, "sys_read"))
                sys_read_seen = true;
-       else if (!strcmp(sym->name, "sys_write"))
+       else if (!strstr(sym->name, "sys_write"))
                sys_write_seen = true;
 }
 
index e89ca4546114e9bee9105a91f1aa9ee554dd1427..918ce17b43fda2d732075a6720f1655fbd22f577 100644 (file)
@@ -52,17 +52,21 @@ struct dummy {
  */
 static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data)
 {
-       void **shadow_leak = shadow_data;
-       void *leak = ctor_data;
+       int **shadow_leak = shadow_data;
+       int **leak = ctor_data;
 
-       *shadow_leak = leak;
+       if (!ctor_data)
+               return -EINVAL;
+
+       *shadow_leak = *leak;
        return 0;
 }
 
 static struct dummy *livepatch_fix1_dummy_alloc(void)
 {
        struct dummy *d;
-       void *leak;
+       int *leak;
+       int **shadow_leak;
 
        d = kzalloc(sizeof(*d), GFP_KERNEL);
        if (!d)
@@ -76,25 +80,34 @@ static struct dummy *livepatch_fix1_dummy_alloc(void)
         * variable.  A patched dummy_free routine can later fetch this
         * pointer to handle resource release.
         */
-       leak = kzalloc(sizeof(int), GFP_KERNEL);
-       if (!leak) {
-               kfree(d);
-               return NULL;
+       leak = kzalloc(sizeof(*leak), GFP_KERNEL);
+       if (!leak)
+               goto err_leak;
+
+       shadow_leak = klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
+                                      shadow_leak_ctor, &leak);
+       if (!shadow_leak) {
+               pr_err("%s: failed to allocate shadow variable for the leaking pointer: dummy @ %p, leak @ %p\n",
+                      __func__, d, leak);
+               goto err_shadow;
        }
 
-       klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
-                        shadow_leak_ctor, leak);
-
        pr_info("%s: dummy @ %p, expires @ %lx\n",
                __func__, d, d->jiffies_expire);
 
        return d;
+
+err_shadow:
+       kfree(leak);
+err_leak:
+       kfree(d);
+       return NULL;
 }
 
 static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
 {
        void *d = obj;
-       void **shadow_leak = shadow_data;
+       int **shadow_leak = shadow_data;
 
        kfree(*shadow_leak);
        pr_info("%s: dummy @ %p, prevented leak @ %p\n",
@@ -103,7 +116,7 @@ static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
 
 static void livepatch_fix1_dummy_free(struct dummy *d)
 {
-       void **shadow_leak;
+       int **shadow_leak;
 
        /*
         * Patch: fetch the saved SV_LEAK shadow variable, detach and
index 50d223b82e8b5643cb8ced9b04c6baa8df7d511b..29fe5cd420472ad2e9e81188132d0ce47b6a55b8 100644 (file)
@@ -59,7 +59,7 @@ static bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies)
 static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
 {
        void *d = obj;
-       void **shadow_leak = shadow_data;
+       int **shadow_leak = shadow_data;
 
        kfree(*shadow_leak);
        pr_info("%s: dummy @ %p, prevented leak @ %p\n",
@@ -68,7 +68,7 @@ static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
 
 static void livepatch_fix2_dummy_free(struct dummy *d)
 {
-       void **shadow_leak;
+       int **shadow_leak;
        int *shadow_count;
 
        /* Patch: copy the memory leak patch from the fix1 module. */
index ecfe83a943a79d4178de48121bf5467a2561d43e..7e753b0d2fa611524c9e2adbe02c8fa3e9b6015e 100644 (file)
@@ -95,7 +95,7 @@ struct dummy {
 static __used noinline struct dummy *dummy_alloc(void)
 {
        struct dummy *d;
-       void *leak;
+       int *leak;
 
        d = kzalloc(sizeof(*d), GFP_KERNEL);
        if (!d)
@@ -105,7 +105,7 @@ static __used noinline struct dummy *dummy_alloc(void)
                msecs_to_jiffies(1000 * EXPIRE_PERIOD);
 
        /* Oops, forgot to save leak! */
-       leak = kzalloc(sizeof(int), GFP_KERNEL);
+       leak = kzalloc(sizeof(*leak), GFP_KERNEL);
        if (!leak) {
                kfree(d);
                return NULL;
index 6d0125ca8af714e712babe7a425eab4695efed04..20291ec6489f31e4d3380c6d102711fc8e2c9692 100644 (file)
@@ -298,14 +298,14 @@ int main(void)
                req = malloc(sizes.seccomp_notif);
                if (!req)
                        goto out_close;
-               memset(req, 0, sizeof(*req));
 
                resp = malloc(sizes.seccomp_notif_resp);
                if (!resp)
                        goto out_req;
-               memset(resp, 0, sizeof(*resp));
+               memset(resp, 0, sizes.seccomp_notif_resp);
 
                while (1) {
+                       memset(req, 0, sizes.seccomp_notif);
                        if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
                                perror("ioctl recv");
                                goto out_resp;
index 7affc3b50b61dd5d293e7b62cfe5675d0a632825..cfc1595802632c252241f5d6001d05c37a758268 100644 (file)
@@ -36,6 +36,7 @@ static int __init trace_printk_init(void)
 
        /* Kick off printing in irq context */
        irq_work_queue(&irqwork);
+       irq_work_sync(&irqwork);
 
        trace_printk("This is a %s that will use trace_bprintk()\n",
                     "static string");
index 4aa1806c59c2a1cdcfcea105df3899d0e5109646..306054ef340f73466c17b945bc8bdb038bd2218b 100644 (file)
@@ -6,7 +6,7 @@ conmakehash
 kallsyms
 unifdef
 recordmcount
-sortextable
+sorttable
 asn1_compiler
 extract-cert
 sign-file
index d4adfbe426903e5aa0a0fd7d58554ce930be08e0..9d07e59cbdf7811fbc19785e1fe3ebfdeb1c1cd9 100644 (file)
@@ -31,6 +31,10 @@ cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /de
 # Return y if the linker supports <flag>, n otherwise
 ld-option = $(success,$(LD) -v $(1))
 
+# $(as-instr,<instr>)
+# Return y if the assembler supports <instr>, n otherwise
+as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
+
 # check if $(CC) and $(LD) exist
 $(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found)
 $(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found)
index 00c47901cb0691802cee2d41519856e6d71d68d8..b0e962611d5062f7ee2a0e240f48d0674b28201d 100644 (file)
@@ -13,17 +13,26 @@ hostprogs-$(CONFIG_BUILD_BIN2C)  += bin2c
 hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
 hostprogs-$(CONFIG_VT)           += conmakehash
 hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
-hostprogs-$(CONFIG_BUILDTIME_EXTABLE_SORT) += sortextable
+hostprogs-$(CONFIG_BUILDTIME_TABLE_SORT) += sorttable
 hostprogs-$(CONFIG_ASN1)        += asn1_compiler
 hostprogs-$(CONFIG_MODULE_SIG_FORMAT) += sign-file
 hostprogs-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += extract-cert
 hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
 
-HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
+HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
 HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
 HOSTLDLIBS_sign-file = -lcrypto
 HOSTLDLIBS_extract-cert = -lcrypto
 
+ifdef CONFIG_UNWINDER_ORC
+ifeq ($(ARCH),x86_64)
+ARCH := x86
+endif
+HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include
+HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED
+HOSTLDLIBS_sorttable = -lpthread
+endif
+
 always         := $(hostprogs-y) $(hostprogs-m)
 
 # The following hostprogs-y programs are only build on demand
index 7cbe6e72e363b79e8d464c68a73f4892a8382feb..a63380c6b0d20f8f2fd9c73b715343177f607bb7 100755 (executable)
@@ -4125,15 +4125,6 @@ sub process {
                             "Prefer [subsystem eg: netdev]_$level2([subsystem]dev, ... then dev_$level2(dev, ... then pr_$level(...  to printk(KERN_$orig ...\n" . $herecurr);
                }
 
-               if ($line =~ /\bpr_warning\s*\(/) {
-                       if (WARN("PREFER_PR_LEVEL",
-                                "Prefer pr_warn(... to pr_warning(...\n" . $herecurr) &&
-                           $fix) {
-                               $fixed[$fixlinenr] =~
-                                   s/\bpr_warning\b/pr_warn/;
-                       }
-               }
-
                if ($line =~ /\bdev_printk\s*\(\s*KERN_([A-Z]+)/) {
                        my $orig = $1;
                        my $level = lc($orig);
index 441799b5359b59dab4fd684ef6b88f8afca7b5c4..9330d4294b74c764f12643a9dab644e068da2c8e 100644 (file)
@@ -51,8 +51,6 @@ expression x;
  x = devm_request_irq(...)
 |
  x = devm_ioremap(...)
-|
- x = devm_ioremap_nocache(...)
 |
  x = devm_ioport_map(...)
 )
@@ -84,8 +82,6 @@ position p;
  x = request_irq(...)
 |
  x = ioremap(...)
-|
- x = ioremap_nocache(...)
 |
  x = ioport_map(...)
 )
index 0e60e1113a1d7f9e6cff2fb7a4b11e4dc8d6cc71..63b81d0c97b61df2bf02c8f9e8f9c30e254f6957 100644 (file)
@@ -23,7 +23,7 @@ int ret;
 position p1,p2,p3;
 @@
 
-e = \(ioremap@p1\|ioremap_nocache@p1\)(...)
+e = \(ioremap@p1\)(...)
 ... when != iounmap(e)
 if (<+...e...+>) S
 ... when any
index d33de0b9f4f5f6268460136611756673373e905c..e3569543bdac213f337b420b726daec4a1470009 100644 (file)
@@ -14,8 +14,8 @@ config HAVE_GCC_PLUGINS
          An arch should select this symbol if it supports building with
          GCC plugins.
 
-config GCC_PLUGINS
-       bool
+menuconfig GCC_PLUGINS
+       bool "GCC plugins"
        depends on HAVE_GCC_PLUGINS
        depends on PLUGIN_HOSTCC != ""
        default y
@@ -25,8 +25,7 @@ config GCC_PLUGINS
 
          See Documentation/core-api/gcc-plugins.rst for details.
 
-menu "GCC plugins"
-       depends on GCC_PLUGINS
+if GCC_PLUGINS
 
 config GCC_PLUGIN_CYC_COMPLEXITY
        bool "Compute the cyclomatic complexity of a function" if EXPERT
@@ -113,4 +112,4 @@ config GCC_PLUGIN_ARM_SSP_PER_TASK
        bool
        depends on GCC_PLUGINS && ARM
 
-endmenu
+endif
index fb55f262f42d6cee3f14475637b29d44028df97c..94153732ec00188b03968781ce908792c5ed7f5c 100644 (file)
@@ -310,6 +310,15 @@ static void output_label(const char *label)
        printf("%s:\n", label);
 }
 
+/* Provide proper symbols relocatability by their '_text' relativeness. */
+static void output_address(unsigned long long addr)
+{
+       if (_text <= addr)
+               printf("\tPTR\t_text + %#llx\n", addr - _text);
+       else
+               printf("\tPTR\t_text - %#llx\n", _text - addr);
+}
+
 /* uncompress a compressed symbol. When this function is called, the best table
  * might still be compressed itself, so the function needs to be recursive */
 static int expand_symbol(const unsigned char *data, int len, char *result)
@@ -360,19 +369,6 @@ static void write_src(void)
 
        printf("\t.section .rodata, \"a\"\n");
 
-       /* Provide proper symbols relocatability by their relativeness
-        * to a fixed anchor point in the runtime image, either '_text'
-        * for absolute address tables, in which case the linker will
-        * emit the final addresses at build time. Otherwise, use the
-        * offset relative to the lowest value encountered of all relative
-        * symbols, and emit non-relocatable fixed offsets that will be fixed
-        * up at runtime.
-        *
-        * The symbol names cannot be used to construct normal symbol
-        * references as the list of symbols contains symbols that are
-        * declared static and are private to their .o files.  This prevents
-        * .tmp_kallsyms.o or any other object from referencing them.
-        */
        if (!base_relative)
                output_label("kallsyms_addresses");
        else
@@ -380,6 +376,13 @@ static void write_src(void)
 
        for (i = 0; i < table_cnt; i++) {
                if (base_relative) {
+                       /*
+                        * Use the offset relative to the lowest value
+                        * encountered of all relative symbols, and emit
+                        * non-relocatable fixed offsets that will be fixed
+                        * up at runtime.
+                        */
+
                        long long offset;
                        int overflow;
 
@@ -402,12 +405,7 @@ static void write_src(void)
                        }
                        printf("\t.long\t%#x\n", (int)offset);
                } else if (!symbol_absolute(&table[i])) {
-                       if (_text <= table[i].addr)
-                               printf("\tPTR\t_text + %#llx\n",
-                                       table[i].addr - _text);
-                       else
-                               printf("\tPTR\t_text - %#llx\n",
-                                       _text - table[i].addr);
+                       output_address(table[i].addr);
                } else {
                        printf("\tPTR\t%#llx\n", table[i].addr);
                }
@@ -416,7 +414,7 @@ static void write_src(void)
 
        if (base_relative) {
                output_label("kallsyms_relative_base");
-               printf("\tPTR\t_text - %#llx\n", _text - relative_base);
+               output_address(relative_base);
                printf("\n");
        }
 
index 77ffff3a053ccb844a0f368ae59661a01bc8444c..9f1de58e9f0c995fba0a0a0cf076be4a18e79ca1 100644 (file)
@@ -254,6 +254,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
 {
        int res, old_count;
 
+       /*
+        * A NULL expr is taken to be yes, but there's also a different way to
+        * represent yes. expr_is_yes() checks for either representation.
+        */
+       if (!e1 || !e2)
+               return expr_is_yes(e1) && expr_is_yes(e2);
+
        if (e1->type != e2->type)
                return 0;
        switch (e1->type) {
index 436379940356130eb1cd4b2a20ded511e4a8b4c8..c287ad9b3a677ec9c34bbed6aec25108dadd9297 100755 (executable)
@@ -180,9 +180,9 @@ mksysmap()
        ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2}
 }
 
-sortextable()
+sorttable()
 {
-       ${objtree}/scripts/sortextable ${1}
+       ${objtree}/scripts/sorttable ${1}
 }
 
 # Delete output files in case of error
@@ -304,9 +304,12 @@ fi
 
 vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
 
-if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
-       info SORTEX vmlinux
-       sortextable vmlinux
+if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then
+       info SORTTAB vmlinux
+       if ! sorttable vmlinux; then
+               echo >&2 Failed to sort kernel tables
+               exit 1
+       fi
 fi
 
 info SYSMAP System.map
index d1d757c6edf4f63bdf66ef9bb4d9bf0f2601d0aa..3a5a4b210c8688f4ddcede7196a2e753d8bf05ed 100755 (executable)
@@ -55,12 +55,10 @@ CONFIG_FLAGS=""
 if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
 if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
 if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi
-UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
 
 # Truncate to maximum length
-
 UTS_LEN=64
-UTS_TRUNCATE="cut -b -$UTS_LEN"
+UTS_VERSION="$(echo $UTS_VERSION $CONFIG_FLAGS $TIMESTAMP | cut -b -$UTS_LEN)"
 
 # Generate a temporary compile.h
 
@@ -69,10 +67,10 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
 
   echo \#define UTS_MACHINE \"$ARCH\"
 
-  echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\"
+  echo \#define UTS_VERSION \"$UTS_VERSION\"
 
-  echo \#define LINUX_COMPILE_BY \"`echo $LINUX_COMPILE_BY | $UTS_TRUNCATE`\"
-  echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\"
+  printf '#define LINUX_COMPILE_BY "%s"\n' "$LINUX_COMPILE_BY"
+  echo \#define LINUX_COMPILE_HOST \"$LINUX_COMPILE_HOST\"
 
   echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version ' | sed 's/[[:space:]]*$//'`\"
 } > .tmpcompile
index e0750b70453f2e19e14175b176dc3cb7110844c1..357dc56bcf30dccc34318766c775c966273cb2ec 100755 (executable)
@@ -136,7 +136,7 @@ mkdir -p debian/source/
 echo "1.0" > debian/source/format
 
 echo $debarch > debian/arch
-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)"
+extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
 extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
 
 # Generate a simple changelog template
@@ -174,7 +174,7 @@ Source: $sourcename
 Section: kernel
 Priority: optional
 Maintainer: $maintainer
-Build-Depends: bc, kmod, cpio, bison, flex | flex:native $extra_build_depends
+Build-Depends: bc, rsync, kmod, cpio, bison, flex | flex:native $extra_build_depends
 Homepage: http://www.kernel.org/
 
 Package: $packagename
index 612268eabef463e783a75bd61252ef6c6e122496..7225107a9aafeb1488d4fb5beb6ed27b853d67a4 100644 (file)
 #define R_AARCH64_ABS64        257
 #endif
 
+#define R_ARM_PC24             1
+#define R_ARM_THM_CALL         10
+#define R_ARM_CALL             28
+
 static int fd_map;     /* File descriptor for file being modified. */
 static int mmap_failed; /* Boolean flag. */
 static char gpfx;      /* prefix for global symbol name (sometimes '_') */
@@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done!
 #define RECORD_MCOUNT_64
 #include "recordmcount.h"
 
+static int arm_is_fake_mcount(Elf32_Rel const *rp)
+{
+       switch (ELF32_R_TYPE(w(rp->r_info))) {
+       case R_ARM_THM_CALL:
+       case R_ARM_CALL:
+       case R_ARM_PC24:
+               return 0;
+       }
+
+       return 1;
+}
+
 /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
  * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
  * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
@@ -523,6 +539,7 @@ static int do_file(char const *const fname)
                altmcount = "__gnu_mcount_nc";
                make_nop = make_nop_arm;
                rel_type_nop = R_ARM_NONE;
+               is_fake_mcount32 = arm_is_fake_mcount;
                gpfx = 0;
                break;
        case EM_AARCH64:
diff --git a/scripts/sortextable.h b/scripts/sortextable.h
deleted file mode 100644 (file)
index d4b3f6c..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sortextable.h
- *
- * Copyright 2011 - 2012 Cavium, Inc.
- *
- * Some of this code was taken out of recordmcount.h written by:
- *
- * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>.  All rights reserved.
- * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
- */
-
-#undef extable_ent_size
-#undef compare_extable
-#undef do_func
-#undef Elf_Addr
-#undef Elf_Ehdr
-#undef Elf_Shdr
-#undef Elf_Rel
-#undef Elf_Rela
-#undef Elf_Sym
-#undef ELF_R_SYM
-#undef Elf_r_sym
-#undef ELF_R_INFO
-#undef Elf_r_info
-#undef ELF_ST_BIND
-#undef ELF_ST_TYPE
-#undef fn_ELF_R_SYM
-#undef fn_ELF_R_INFO
-#undef uint_t
-#undef _r
-#undef _w
-
-#ifdef SORTEXTABLE_64
-# define extable_ent_size      16
-# define compare_extable       compare_extable_64
-# define do_func               do64
-# define Elf_Addr              Elf64_Addr
-# define Elf_Ehdr              Elf64_Ehdr
-# define Elf_Shdr              Elf64_Shdr
-# define Elf_Rel               Elf64_Rel
-# define Elf_Rela              Elf64_Rela
-# define Elf_Sym               Elf64_Sym
-# define ELF_R_SYM             ELF64_R_SYM
-# define Elf_r_sym             Elf64_r_sym
-# define ELF_R_INFO            ELF64_R_INFO
-# define Elf_r_info            Elf64_r_info
-# define ELF_ST_BIND           ELF64_ST_BIND
-# define ELF_ST_TYPE           ELF64_ST_TYPE
-# define fn_ELF_R_SYM          fn_ELF64_R_SYM
-# define fn_ELF_R_INFO         fn_ELF64_R_INFO
-# define uint_t                        uint64_t
-# define _r                    r8
-# define _w                    w8
-#else
-# define extable_ent_size      8
-# define compare_extable       compare_extable_32
-# define do_func               do32
-# define Elf_Addr              Elf32_Addr
-# define Elf_Ehdr              Elf32_Ehdr
-# define Elf_Shdr              Elf32_Shdr
-# define Elf_Rel               Elf32_Rel
-# define Elf_Rela              Elf32_Rela
-# define Elf_Sym               Elf32_Sym
-# define ELF_R_SYM             ELF32_R_SYM
-# define Elf_r_sym             Elf32_r_sym
-# define ELF_R_INFO            ELF32_R_INFO
-# define Elf_r_info            Elf32_r_info
-# define ELF_ST_BIND           ELF32_ST_BIND
-# define ELF_ST_TYPE           ELF32_ST_TYPE
-# define fn_ELF_R_SYM          fn_ELF32_R_SYM
-# define fn_ELF_R_INFO         fn_ELF32_R_INFO
-# define uint_t                        uint32_t
-# define _r                    r
-# define _w                    w
-#endif
-
-static int compare_extable(const void *a, const void *b)
-{
-       Elf_Addr av = _r(a);
-       Elf_Addr bv = _r(b);
-
-       if (av < bv)
-               return -1;
-       if (av > bv)
-               return 1;
-       return 0;
-}
-
-static void
-do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
-{
-       Elf_Shdr *shdr;
-       Elf_Shdr *shstrtab_sec;
-       Elf_Shdr *strtab_sec = NULL;
-       Elf_Shdr *symtab_sec = NULL;
-       Elf_Shdr *extab_sec = NULL;
-       Elf_Sym *sym;
-       const Elf_Sym *symtab;
-       Elf32_Word *symtab_shndx_start = NULL;
-       Elf_Sym *sort_needed_sym;
-       Elf_Shdr *sort_needed_sec;
-       Elf_Rel *relocs = NULL;
-       int relocs_size = 0;
-       uint32_t *sort_done_location;
-       const char *secstrtab;
-       const char *strtab;
-       char *extab_image;
-       int extab_index = 0;
-       int i;
-       int idx;
-       unsigned int num_sections;
-       unsigned int secindex_strings;
-
-       shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
-
-       num_sections = r2(&ehdr->e_shnum);
-       if (num_sections == SHN_UNDEF)
-               num_sections = _r(&shdr[0].sh_size);
-
-       secindex_strings = r2(&ehdr->e_shstrndx);
-       if (secindex_strings == SHN_XINDEX)
-               secindex_strings = r(&shdr[0].sh_link);
-
-       shstrtab_sec = shdr + secindex_strings;
-       secstrtab = (const char *)ehdr + _r(&shstrtab_sec->sh_offset);
-       for (i = 0; i < num_sections; i++) {
-               idx = r(&shdr[i].sh_name);
-               if (strcmp(secstrtab + idx, "__ex_table") == 0) {
-                       extab_sec = shdr + i;
-                       extab_index = i;
-               }
-               if ((r(&shdr[i].sh_type) == SHT_REL ||
-                    r(&shdr[i].sh_type) == SHT_RELA) &&
-                   r(&shdr[i].sh_info) == extab_index) {
-                       relocs = (void *)ehdr + _r(&shdr[i].sh_offset);
-                       relocs_size = _r(&shdr[i].sh_size);
-               }
-               if (strcmp(secstrtab + idx, ".symtab") == 0)
-                       symtab_sec = shdr + i;
-               if (strcmp(secstrtab + idx, ".strtab") == 0)
-                       strtab_sec = shdr + i;
-               if (r(&shdr[i].sh_type) == SHT_SYMTAB_SHNDX)
-                       symtab_shndx_start = (Elf32_Word *)(
-                               (const char *)ehdr + _r(&shdr[i].sh_offset));
-       }
-       if (strtab_sec == NULL) {
-               fprintf(stderr, "no .strtab in  file: %s\n", fname);
-               fail_file();
-       }
-       if (symtab_sec == NULL) {
-               fprintf(stderr, "no .symtab in  file: %s\n", fname);
-               fail_file();
-       }
-       symtab = (const Elf_Sym *)((const char *)ehdr +
-                                  _r(&symtab_sec->sh_offset));
-       if (extab_sec == NULL) {
-               fprintf(stderr, "no __ex_table in  file: %s\n", fname);
-               fail_file();
-       }
-       strtab = (const char *)ehdr + _r(&strtab_sec->sh_offset);
-
-       extab_image = (void *)ehdr + _r(&extab_sec->sh_offset);
-
-       if (custom_sort) {
-               custom_sort(extab_image, _r(&extab_sec->sh_size));
-       } else {
-               int num_entries = _r(&extab_sec->sh_size) / extable_ent_size;
-               qsort(extab_image, num_entries,
-                     extable_ent_size, compare_extable);
-       }
-       /* If there were relocations, we no longer need them. */
-       if (relocs)
-               memset(relocs, 0, relocs_size);
-
-       /* find main_extable_sort_needed */
-       sort_needed_sym = NULL;
-       for (i = 0; i < _r(&symtab_sec->sh_size) / sizeof(Elf_Sym); i++) {
-               sym = (void *)ehdr + _r(&symtab_sec->sh_offset);
-               sym += i;
-               if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
-                       continue;
-               idx = r(&sym->st_name);
-               if (strcmp(strtab + idx, "main_extable_sort_needed") == 0) {
-                       sort_needed_sym = sym;
-                       break;
-               }
-       }
-       if (sort_needed_sym == NULL) {
-               fprintf(stderr,
-                       "no main_extable_sort_needed symbol in  file: %s\n",
-                       fname);
-               fail_file();
-       }
-       sort_needed_sec = &shdr[get_secindex(r2(&sym->st_shndx),
-                                            sort_needed_sym - symtab,
-                                            symtab_shndx_start)];
-       sort_done_location = (void *)ehdr +
-               _r(&sort_needed_sec->sh_offset) +
-               _r(&sort_needed_sym->st_value) -
-               _r(&sort_needed_sec->sh_addr);
-
-#if 0
-       printf("sort done marker at %lx\n",
-              (unsigned long)((char *)sort_done_location - (char *)ehdr));
-#endif
-       /* We sorted it, clear the flag. */
-       w(0, sort_done_location);
-}
similarity index 67%
rename from scripts/sortextable.c
rename to scripts/sorttable.c
index 55768654e3c6a3ba4686bb0b81147a06b2741503..ec6b5e81eba190b01e258feb64ebd341d0f1c4ed 100644 (file)
@@ -1,6 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * sortextable.c: Sort the kernel's exception table
+ * sorttable.c: Sort the kernel's table
+ *
+ * Added ORC unwind tables sort support and other updates:
+ * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
+ * Shile Zhang <shile.zhang@linux.alibaba.com>
  *
  * Copyright 2011 - 2012 Cavium, Inc.
  *
@@ -9,7 +13,7 @@
  * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>.  All rights reserved.
  *
  * Restructured to fit Linux format, as well as other updates:
- *  Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
  */
 
 /*
@@ -22,7 +26,6 @@
 #include <getopt.h>
 #include <elf.h>
 #include <fcntl.h>
-#include <setjmp.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #define EM_ARCV2       195
 #endif
 
-static int fd_map;     /* File descriptor for file being modified. */
-static int mmap_failed; /* Boolean flag. */
-static void *ehdr_curr; /* current ElfXX_Ehdr *  for resource cleanup */
-static struct stat sb; /* Remember .st_size, etc. */
-static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
-
-/* setjmp() return values */
-enum {
-       SJ_SETJMP = 0,  /* hardwired first return */
-       SJ_FAIL,
-       SJ_SUCCEED
-};
-
-/* Per-file resource cleanup when multiple files. */
-static void
-cleanup(void)
-{
-       if (!mmap_failed)
-               munmap(ehdr_curr, sb.st_size);
-       close(fd_map);
-}
-
-static void __attribute__((noreturn))
-fail_file(void)
-{
-       cleanup();
-       longjmp(jmpenv, SJ_FAIL);
-}
+static uint32_t (*r)(const uint32_t *);
+static uint16_t (*r2)(const uint16_t *);
+static uint64_t (*r8)(const uint64_t *);
+static void (*w)(uint32_t, uint32_t *);
+static void (*w2)(uint16_t, uint16_t *);
+static void (*w8)(uint64_t, uint64_t *);
+typedef void (*table_sort_t)(char *, int);
 
 /*
  * Get the whole file as a programming convenience in order to avoid
@@ -86,87 +68,98 @@ fail_file(void)
  * avoids copying unused pieces; else just read the whole file.
  * Open for both read and write.
  */
-static void *mmap_file(char const *fname)
+static void *mmap_file(char const *fname, size_t *size)
 {
-       void *addr;
+       int fd;
+       struct stat sb;
+       void *addr = NULL;
 
-       fd_map = open(fname, O_RDWR);
-       if (fd_map < 0 || fstat(fd_map, &sb) < 0) {
+       fd = open(fname, O_RDWR);
+       if (fd < 0) {
                perror(fname);
-               fail_file();
+               return NULL;
+       }
+       if (fstat(fd, &sb) < 0) {
+               perror(fname);
+               goto out;
        }
        if (!S_ISREG(sb.st_mode)) {
                fprintf(stderr, "not a regular file: %s\n", fname);
-               fail_file();
+               goto out;
        }
-       addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED,
-                   fd_map, 0);
+
+       addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
        if (addr == MAP_FAILED) {
-               mmap_failed = 1;
                fprintf(stderr, "Could not mmap file: %s\n", fname);
-               fail_file();
+               goto out;
        }
+
+       *size = sb.st_size;
+
+out:
+       close(fd);
        return addr;
 }
 
-static uint64_t r8be(const uint64_t *x)
-{
-       return get_unaligned_be64(x);
-}
 static uint32_t rbe(const uint32_t *x)
 {
        return get_unaligned_be32(x);
 }
+
 static uint16_t r2be(const uint16_t *x)
 {
        return get_unaligned_be16(x);
 }
-static uint64_t r8le(const uint64_t *x)
+
+static uint64_t r8be(const uint64_t *x)
 {
-       return get_unaligned_le64(x);
+       return get_unaligned_be64(x);
 }
+
 static uint32_t rle(const uint32_t *x)
 {
        return get_unaligned_le32(x);
 }
+
 static uint16_t r2le(const uint16_t *x)
 {
        return get_unaligned_le16(x);
 }
 
-static void w8be(uint64_t val, uint64_t *x)
+static uint64_t r8le(const uint64_t *x)
 {
-       put_unaligned_be64(val, x);
+       return get_unaligned_le64(x);
 }
+
 static void wbe(uint32_t val, uint32_t *x)
 {
        put_unaligned_be32(val, x);
 }
+
 static void w2be(uint16_t val, uint16_t *x)
 {
        put_unaligned_be16(val, x);
 }
-static void w8le(uint64_t val, uint64_t *x)
+
+static void w8be(uint64_t val, uint64_t *x)
 {
-       put_unaligned_le64(val, x);
+       put_unaligned_be64(val, x);
 }
+
 static void wle(uint32_t val, uint32_t *x)
 {
        put_unaligned_le32(val, x);
 }
+
 static void w2le(uint16_t val, uint16_t *x)
 {
        put_unaligned_le16(val, x);
 }
 
-static uint64_t (*r8)(const uint64_t *);
-static uint32_t (*r)(const uint32_t *);
-static uint16_t (*r2)(const uint16_t *);
-static void (*w8)(uint64_t, uint64_t *);
-static void (*w)(uint32_t, uint32_t *);
-static void (*w2)(uint16_t, uint16_t *);
-
-typedef void (*table_sort_t)(char *, int);
+static void w8le(uint64_t val, uint64_t *x)
+{
+       put_unaligned_le64(val, x);
+}
 
 /*
  * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
@@ -193,9 +186,9 @@ static inline unsigned int get_secindex(unsigned int shndx,
 }
 
 /* 32 bit and 64 bit are very similar */
-#include "sortextable.h"
-#define SORTEXTABLE_64
-#include "sortextable.h"
+#include "sorttable.h"
+#define SORTTABLE_64
+#include "sorttable.h"
 
 static int compare_relative_table(const void *a, const void *b)
 {
@@ -209,110 +202,100 @@ static int compare_relative_table(const void *a, const void *b)
        return 0;
 }
 
-static void x86_sort_relative_table(char *extab_image, int image_size)
+static void sort_relative_table(char *extab_image, int image_size)
 {
-       int i;
+       int i = 0;
 
-       i = 0;
+       /*
+        * Do the same thing the runtime sort does, first normalize to
+        * being relative to the start of the section.
+        */
        while (i < image_size) {
                uint32_t *loc = (uint32_t *)(extab_image + i);
-
                w(r(loc) + i, loc);
-               w(r(loc + 1) + i + 4, loc + 1);
-               w(r(loc + 2) + i + 8, loc + 2);
-
-               i += sizeof(uint32_t) * 3;
+               i += 4;
        }
 
-       qsort(extab_image, image_size / 12, 12, compare_relative_table);
+       qsort(extab_image, image_size / 8, 8, compare_relative_table);
 
+       /* Now denormalize. */
        i = 0;
        while (i < image_size) {
                uint32_t *loc = (uint32_t *)(extab_image + i);
-
                w(r(loc) - i, loc);
-               w(r(loc + 1) - (i + 4), loc + 1);
-               w(r(loc + 2) - (i + 8), loc + 2);
-
-               i += sizeof(uint32_t) * 3;
+               i += 4;
        }
 }
 
-static void sort_relative_table(char *extab_image, int image_size)
+static void x86_sort_relative_table(char *extab_image, int image_size)
 {
-       int i;
+       int i = 0;
 
-       /*
-        * Do the same thing the runtime sort does, first normalize to
-        * being relative to the start of the section.
-        */
-       i = 0;
        while (i < image_size) {
                uint32_t *loc = (uint32_t *)(extab_image + i);
+
                w(r(loc) + i, loc);
-               i += 4;
+               w(r(loc + 1) + i + 4, loc + 1);
+               w(r(loc + 2) + i + 8, loc + 2);
+
+               i += sizeof(uint32_t) * 3;
        }
 
-       qsort(extab_image, image_size / 8, 8, compare_relative_table);
+       qsort(extab_image, image_size / 12, 12, compare_relative_table);
 
-       /* Now denormalize. */
        i = 0;
        while (i < image_size) {
                uint32_t *loc = (uint32_t *)(extab_image + i);
+
                w(r(loc) - i, loc);
-               i += 4;
+               w(r(loc + 1) - (i + 4), loc + 1);
+               w(r(loc + 2) - (i + 8), loc + 2);
+
+               i += sizeof(uint32_t) * 3;
        }
 }
 
-static void
-do_file(char const *const fname)
+static int do_file(char const *const fname, void *addr)
 {
-       table_sort_t custom_sort;
-       Elf32_Ehdr *ehdr = mmap_file(fname);
+       int rc = -1;
+       Elf32_Ehdr *ehdr = addr;
+       table_sort_t custom_sort = NULL;
 
-       ehdr_curr = ehdr;
        switch (ehdr->e_ident[EI_DATA]) {
-       default:
-               fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
-                       ehdr->e_ident[EI_DATA], fname);
-               fail_file();
-               break;
        case ELFDATA2LSB:
-               r = rle;
-               r2 = r2le;
-               r8 = r8le;
-               w = wle;
-               w2 = w2le;
-               w8 = w8le;
+               r       = rle;
+               r2      = r2le;
+               r8      = r8le;
+               w       = wle;
+               w2      = w2le;
+               w8      = w8le;
                break;
        case ELFDATA2MSB:
-               r = rbe;
-               r2 = r2be;
-               r8 = r8be;
-               w = wbe;
-               w2 = w2be;
-               w8 = w8be;
+               r       = rbe;
+               r2      = r2be;
+               r8      = r8be;
+               w       = wbe;
+               w2      = w2be;
+               w8      = w8be;
                break;
-       }  /* end switch */
-       if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
-       ||  (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
-       ||  ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
+       default:
+               fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
+                       ehdr->e_ident[EI_DATA], fname);
+               return -1;
+       }
+
+       if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
+           (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
+           ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
                fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
-               fail_file();
+               return -1;
        }
 
-       custom_sort = NULL;
        switch (r2(&ehdr->e_machine)) {
-       default:
-               fprintf(stderr, "unrecognized e_machine %d %s\n",
-                       r2(&ehdr->e_machine), fname);
-               fail_file();
-               break;
        case EM_386:
        case EM_X86_64:
                custom_sort = x86_sort_relative_table;
                break;
-
        case EM_S390:
        case EM_AARCH64:
        case EM_PARISC:
@@ -327,74 +310,68 @@ do_file(char const *const fname)
        case EM_MIPS:
        case EM_XTENSA:
                break;
-       }  /* end switch */
+       default:
+               fprintf(stderr, "unrecognized e_machine %d %s\n",
+                       r2(&ehdr->e_machine), fname);
+               return -1;
+       }
 
        switch (ehdr->e_ident[EI_CLASS]) {
-       default:
-               fprintf(stderr, "unrecognized ELF class %d %s\n",
-                       ehdr->e_ident[EI_CLASS], fname);
-               fail_file();
-               break;
        case ELFCLASS32:
-               if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
-               ||  r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
+               if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
+                   r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
                        fprintf(stderr,
                                "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
-                       fail_file();
+                       break;
                }
-               do32(ehdr, fname, custom_sort);
+               rc = do_sort_32(ehdr, fname, custom_sort);
                break;
-       case ELFCLASS64: {
+       case ELFCLASS64:
+               {
                Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
-               if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
-               ||  r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
+               if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
+                   r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
                        fprintf(stderr,
-                               "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
-                       fail_file();
+                               "unrecognized ET_EXEC/ET_DYN file: %s\n",
+                               fname);
+                       break;
+               }
+               rc = do_sort_64(ghdr, fname, custom_sort);
                }
-               do64(ghdr, fname, custom_sort);
+               break;
+       default:
+               fprintf(stderr, "unrecognized ELF class %d %s\n",
+                       ehdr->e_ident[EI_CLASS], fname);
                break;
        }
-       }  /* end switch */
 
-       cleanup();
+       return rc;
 }
 
-int
-main(int argc, char *argv[])
+int main(int argc, char *argv[])
 {
-       int n_error = 0;  /* gcc-4.3.0 false positive complaint */
-       int i;
+       int i, n_error = 0;  /* gcc-4.3.0 false positive complaint */
+       size_t size = 0;
+       void *addr = NULL;
 
        if (argc < 2) {
-               fprintf(stderr, "usage: sortextable vmlinux...\n");
+               fprintf(stderr, "usage: sorttable vmlinux...\n");
                return 0;
        }
 
        /* Process each file in turn, allowing deep failure. */
        for (i = 1; i < argc; i++) {
-               char *file = argv[i];
-               int const sjval = setjmp(jmpenv);
+               addr = mmap_file(argv[i], &size);
+               if (!addr) {
+                       ++n_error;
+                       continue;
+               }
 
-               switch (sjval) {
-               default:
-                       fprintf(stderr, "internal error: %s\n", file);
-                       exit(1);
-                       break;
-               case SJ_SETJMP:    /* normal sequence */
-                       /* Avoid problems if early cleanup() */
-                       fd_map = -1;
-                       ehdr_curr = NULL;
-                       mmap_failed = 1;
-                       do_file(file);
-                       break;
-               case SJ_FAIL:    /* error in do_file or below */
+               if (do_file(argv[i], addr))
                        ++n_error;
-                       break;
-               case SJ_SUCCEED:    /* premature success */
-                       /* do nothing */
-                       break;
-               }  /* end switch */
+
+               munmap(addr, size);
        }
+
        return !!n_error;
 }
diff --git a/scripts/sorttable.h b/scripts/sorttable.h
new file mode 100644 (file)
index 0000000..a2baa2f
--- /dev/null
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * sorttable.h
+ *
+ * Added ORC unwind tables sort support and other updates:
+ * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
+ * Shile Zhang <shile.zhang@linux.alibaba.com>
+ *
+ * Copyright 2011 - 2012 Cavium, Inc.
+ *
+ * Some of code was taken out of arch/x86/kernel/unwind_orc.c, written by:
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * Some of this code was taken out of recordmcount.h written by:
+ *
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
+ * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ */
+
+#undef extable_ent_size
+#undef compare_extable
+#undef do_sort
+#undef Elf_Addr
+#undef Elf_Ehdr
+#undef Elf_Shdr
+#undef Elf_Rel
+#undef Elf_Rela
+#undef Elf_Sym
+#undef ELF_R_SYM
+#undef Elf_r_sym
+#undef ELF_R_INFO
+#undef Elf_r_info
+#undef ELF_ST_BIND
+#undef ELF_ST_TYPE
+#undef fn_ELF_R_SYM
+#undef fn_ELF_R_INFO
+#undef uint_t
+#undef _r
+#undef _w
+
+#ifdef SORTTABLE_64
+# define extable_ent_size      16
+# define compare_extable       compare_extable_64
+# define do_sort               do_sort_64
+# define Elf_Addr              Elf64_Addr
+# define Elf_Ehdr              Elf64_Ehdr
+# define Elf_Shdr              Elf64_Shdr
+# define Elf_Rel               Elf64_Rel
+# define Elf_Rela              Elf64_Rela
+# define Elf_Sym               Elf64_Sym
+# define ELF_R_SYM             ELF64_R_SYM
+# define Elf_r_sym             Elf64_r_sym
+# define ELF_R_INFO            ELF64_R_INFO
+# define Elf_r_info            Elf64_r_info
+# define ELF_ST_BIND           ELF64_ST_BIND
+# define ELF_ST_TYPE           ELF64_ST_TYPE
+# define fn_ELF_R_SYM          fn_ELF64_R_SYM
+# define fn_ELF_R_INFO         fn_ELF64_R_INFO
+# define uint_t                        uint64_t
+# define _r                    r8
+# define _w                    w8
+#else
+# define extable_ent_size      8
+# define compare_extable       compare_extable_32
+# define do_sort               do_sort_32
+# define Elf_Addr              Elf32_Addr
+# define Elf_Ehdr              Elf32_Ehdr
+# define Elf_Shdr              Elf32_Shdr
+# define Elf_Rel               Elf32_Rel
+# define Elf_Rela              Elf32_Rela
+# define Elf_Sym               Elf32_Sym
+# define ELF_R_SYM             ELF32_R_SYM
+# define Elf_r_sym             Elf32_r_sym
+# define ELF_R_INFO            ELF32_R_INFO
+# define Elf_r_info            Elf32_r_info
+# define ELF_ST_BIND           ELF32_ST_BIND
+# define ELF_ST_TYPE           ELF32_ST_TYPE
+# define fn_ELF_R_SYM          fn_ELF32_R_SYM
+# define fn_ELF_R_INFO         fn_ELF32_R_INFO
+# define uint_t                        uint32_t
+# define _r                    r
+# define _w                    w
+#endif
+
+#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
+/* ORC unwinder only support X86_64 */
+#include <errno.h>
+#include <pthread.h>
+#include <asm/orc_types.h>
+
+#define ERRSTR_MAXSZ   256
+
+char g_err[ERRSTR_MAXSZ];
+int *g_orc_ip_table;
+struct orc_entry *g_orc_table;
+
+pthread_t orc_sort_thread;
+
+static inline unsigned long orc_ip(const int *ip)
+{
+       return (unsigned long)ip + *ip;
+}
+
+static int orc_sort_cmp(const void *_a, const void *_b)
+{
+       struct orc_entry *orc_a;
+       const int *a = g_orc_ip_table + *(int *)_a;
+       const int *b = g_orc_ip_table + *(int *)_b;
+       unsigned long a_val = orc_ip(a);
+       unsigned long b_val = orc_ip(b);
+
+       if (a_val > b_val)
+               return 1;
+       if (a_val < b_val)
+               return -1;
+
+       /*
+        * The "weak" section terminator entries need to always be on the left
+        * to ensure the lookup code skips them in favor of real entries.
+        * These terminator entries exist to handle any gaps created by
+        * whitelisted .o files which didn't get objtool generation.
+        */
+       orc_a = g_orc_table + (a - g_orc_ip_table);
+       return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
+}
+
+static void *sort_orctable(void *arg)
+{
+       int i;
+       int *idxs = NULL;
+       int *tmp_orc_ip_table = NULL;
+       struct orc_entry *tmp_orc_table = NULL;
+       unsigned int *orc_ip_size = (unsigned int *)arg;
+       unsigned int num_entries = *orc_ip_size / sizeof(int);
+       unsigned int orc_size = num_entries * sizeof(struct orc_entry);
+
+       idxs = (int *)malloc(*orc_ip_size);
+       if (!idxs) {
+               snprintf(g_err, ERRSTR_MAXSZ, "malloc idxs: %s",
+                        strerror(errno));
+               pthread_exit(g_err);
+       }
+
+       tmp_orc_ip_table = (int *)malloc(*orc_ip_size);
+       if (!tmp_orc_ip_table) {
+               snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_ip_table: %s",
+                        strerror(errno));
+               pthread_exit(g_err);
+       }
+
+       tmp_orc_table = (struct orc_entry *)malloc(orc_size);
+       if (!tmp_orc_table) {
+               snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_table: %s",
+                        strerror(errno));
+               pthread_exit(g_err);
+       }
+
+       /* initialize indices array, convert ip_table to absolute address */
+       for (i = 0; i < num_entries; i++) {
+               idxs[i] = i;
+               tmp_orc_ip_table[i] = g_orc_ip_table[i] + i * sizeof(int);
+       }
+       memcpy(tmp_orc_table, g_orc_table, orc_size);
+
+       qsort(idxs, num_entries, sizeof(int), orc_sort_cmp);
+
+       for (i = 0; i < num_entries; i++) {
+               if (idxs[i] == i)
+                       continue;
+
+               /* convert back to relative address */
+               g_orc_ip_table[i] = tmp_orc_ip_table[idxs[i]] - i * sizeof(int);
+               g_orc_table[i] = tmp_orc_table[idxs[i]];
+       }
+
+       free(idxs);
+       free(tmp_orc_ip_table);
+       free(tmp_orc_table);
+       pthread_exit(NULL);
+}
+#endif
+
+static int compare_extable(const void *a, const void *b)
+{
+       Elf_Addr av = _r(a);
+       Elf_Addr bv = _r(b);
+
+       if (av < bv)
+               return -1;
+       if (av > bv)
+               return 1;
+       return 0;
+}
+
+static int do_sort(Elf_Ehdr *ehdr,
+                  char const *const fname,
+                  table_sort_t custom_sort)
+{
+       int rc = -1;
+       Elf_Shdr *s, *shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
+       Elf_Shdr *strtab_sec = NULL;
+       Elf_Shdr *symtab_sec = NULL;
+       Elf_Shdr *extab_sec = NULL;
+       Elf_Sym *sym;
+       const Elf_Sym *symtab;
+       Elf32_Word *symtab_shndx = NULL;
+       Elf_Sym *sort_needed_sym = NULL;
+       Elf_Shdr *sort_needed_sec;
+       Elf_Rel *relocs = NULL;
+       int relocs_size = 0;
+       uint32_t *sort_needed_loc;
+       const char *secstrings;
+       const char *strtab;
+       char *extab_image;
+       int extab_index = 0;
+       int i;
+       int idx;
+       unsigned int shnum;
+       unsigned int shstrndx;
+#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
+       unsigned int orc_ip_size = 0;
+       unsigned int orc_size = 0;
+       unsigned int orc_num_entries = 0;
+#endif
+
+       shstrndx = r2(&ehdr->e_shstrndx);
+       if (shstrndx == SHN_XINDEX)
+               shstrndx = r(&shdr[0].sh_link);
+       secstrings = (const char *)ehdr + _r(&shdr[shstrndx].sh_offset);
+
+       shnum = r2(&ehdr->e_shnum);
+       if (shnum == SHN_UNDEF)
+               shnum = _r(&shdr[0].sh_size);
+
+       for (i = 0, s = shdr; s < shdr + shnum; i++, s++) {
+               idx = r(&s->sh_name);
+               if (!strcmp(secstrings + idx, "__ex_table")) {
+                       extab_sec = s;
+                       extab_index = i;
+               }
+               if (!strcmp(secstrings + idx, ".symtab"))
+                       symtab_sec = s;
+               if (!strcmp(secstrings + idx, ".strtab"))
+                       strtab_sec = s;
+
+               if ((r(&s->sh_type) == SHT_REL ||
+                    r(&s->sh_type) == SHT_RELA) &&
+                   r(&s->sh_info) == extab_index) {
+                       relocs = (void *)ehdr + _r(&s->sh_offset);
+                       relocs_size = _r(&s->sh_size);
+               }
+               if (r(&s->sh_type) == SHT_SYMTAB_SHNDX)
+                       symtab_shndx = (Elf32_Word *)((const char *)ehdr +
+                                                     _r(&s->sh_offset));
+
+#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
+               /* locate the ORC unwind tables */
+               if (!strcmp(secstrings + idx, ".orc_unwind_ip")) {
+                       orc_ip_size = s->sh_size;
+                       g_orc_ip_table = (int *)((void *)ehdr +
+                                                  s->sh_offset);
+               }
+               if (!strcmp(secstrings + idx, ".orc_unwind")) {
+                       orc_size = s->sh_size;
+                       g_orc_table = (struct orc_entry *)((void *)ehdr +
+                                                            s->sh_offset);
+               }
+#endif
+       } /* for loop */
+
+#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
+       if (!g_orc_ip_table || !g_orc_table) {
+               fprintf(stderr,
+                       "incomplete ORC unwind tables in file: %s\n", fname);
+               goto out;
+       }
+
+       orc_num_entries = orc_ip_size / sizeof(int);
+       if (orc_ip_size % sizeof(int) != 0 ||
+           orc_size % sizeof(struct orc_entry) != 0 ||
+           orc_num_entries != orc_size / sizeof(struct orc_entry)) {
+               fprintf(stderr,
+                       "inconsistent ORC unwind table entries in file: %s\n",
+                       fname);
+               goto out;
+       }
+
+       /* create thread to sort ORC unwind tables concurrently */
+       if (pthread_create(&orc_sort_thread, NULL,
+                          sort_orctable, &orc_ip_size)) {
+               fprintf(stderr,
+                       "pthread_create orc_sort_thread failed '%s': %s\n",
+                       strerror(errno), fname);
+               goto out;
+       }
+#endif
+       if (!extab_sec) {
+               fprintf(stderr, "no __ex_table in file: %s\n", fname);
+               goto out;
+       }
+
+       if (!symtab_sec) {
+               fprintf(stderr, "no .symtab in file: %s\n", fname);
+               goto out;
+       }
+
+       if (!strtab_sec) {
+               fprintf(stderr, "no .strtab in file: %s\n", fname);
+               goto out;
+       }
+
+       extab_image = (void *)ehdr + _r(&extab_sec->sh_offset);
+       strtab = (const char *)ehdr + _r(&strtab_sec->sh_offset);
+       symtab = (const Elf_Sym *)((const char *)ehdr +
+                                                 _r(&symtab_sec->sh_offset));
+
+       if (custom_sort) {
+               custom_sort(extab_image, _r(&extab_sec->sh_size));
+       } else {
+               int num_entries = _r(&extab_sec->sh_size) / extable_ent_size;
+               qsort(extab_image, num_entries,
+                     extable_ent_size, compare_extable);
+       }
+
+       /* If there were relocations, we no longer need them. */
+       if (relocs)
+               memset(relocs, 0, relocs_size);
+
+       /* find the flag main_extable_sort_needed */
+       for (sym = (void *)ehdr + _r(&symtab_sec->sh_offset);
+            sym < sym + _r(&symtab_sec->sh_size) / sizeof(Elf_Sym);
+            sym++) {
+               if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
+                       continue;
+               if (!strcmp(strtab + r(&sym->st_name),
+                           "main_extable_sort_needed")) {
+                       sort_needed_sym = sym;
+                       break;
+               }
+       }
+
+       if (!sort_needed_sym) {
+               fprintf(stderr,
+                       "no main_extable_sort_needed symbol in file: %s\n",
+                       fname);
+               goto out;
+       }
+
+       sort_needed_sec = &shdr[get_secindex(r2(&sym->st_shndx),
+                                            sort_needed_sym - symtab,
+                                            symtab_shndx)];
+       sort_needed_loc = (void *)ehdr +
+               _r(&sort_needed_sec->sh_offset) +
+               _r(&sort_needed_sym->st_value) -
+               _r(&sort_needed_sec->sh_addr);
+
+       /* extable has been sorted, clear the flag */
+       w(0, sort_needed_loc);
+       rc = 0;
+
+out:
+#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
+       if (orc_sort_thread) {
+               void *retval = NULL;
+               /* wait for ORC tables sort done */
+               rc = pthread_join(orc_sort_thread, &retval);
+               if (rc)
+                       fprintf(stderr,
+                               "pthread_join failed '%s': %s\n",
+                               strerror(errno), fname);
+               else if (retval) {
+                       rc = -1;
+                       fprintf(stderr,
+                               "failed to sort ORC tables '%s': %s\n",
+                               (char *)retval, fname);
+               }
+       }
+#endif
+       return rc;
+}
index be1dd9d2cb2fb90d3969902e456144b6ecc3ea89..74643849902950fd7c0dd286aec84e2b57ce4be3 100644 (file)
@@ -22,7 +22,7 @@ obj-$(CONFIG_SECURITY)                        += security.o
 obj-$(CONFIG_SECURITYFS)               += inode.o
 obj-$(CONFIG_SECURITY_SELINUX)         += selinux/
 obj-$(CONFIG_SECURITY_SMACK)           += smack/
-obj-$(CONFIG_AUDIT)                    += lsm_audit.o
+obj-$(CONFIG_SECURITY)                 += lsm_audit.o
 obj-$(CONFIG_SECURITY_TOMOYO)          += tomoyo/
 obj-$(CONFIG_SECURITY_APPARMOR)                += apparmor/
 obj-$(CONFIG_SECURITY_YAMA)            += yama/
index 09996f2552ee45b1503d3fae321032a90e03cc14..47aff8700547d2434e2b4beeffa89dc59f8b1e96 100644 (file)
@@ -623,7 +623,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
 
 void __aa_bump_ns_revision(struct aa_ns *ns)
 {
-       ns->revision++;
+       WRITE_ONCE(ns->revision, ns->revision + 1);
        wake_up_interruptible(&ns->wait);
 }
 
index 9be7ccb8379edff7b2261ed87c0ec90a4ebe2670..6ceb74e0f7895548c5faaa6f95b7eb8c11367e87 100644 (file)
@@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
 
        if (!bprm || !profile->xattr_count)
                return 0;
+       might_sleep();
 
        /* transition from exec match to xattr set */
        state = aa_dfa_null_transition(profile->xmatch, state);
@@ -361,10 +362,11 @@ out:
 }
 
 /**
- * __attach_match_ - find an attachment match
+ * find_attach - do attachment search for unconfined processes
  * @bprm - binprm structure of transitioning task
- * @name - to match against  (NOT NULL)
+ * @ns: the current namespace  (NOT NULL)
  * @head - profile list to walk  (NOT NULL)
+ * @name - to match against  (NOT NULL)
  * @info - info message if there was an error (NOT NULL)
  *
  * Do a linear search on the profiles in the list.  There is a matching
@@ -374,12 +376,11 @@ out:
  *
  * Requires: @head not be shared or have appropriate locks held
  *
- * Returns: profile or NULL if no match found
+ * Returns: label or NULL if no match found
  */
-static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
-                                        const char *name,
-                                        struct list_head *head,
-                                        const char **info)
+static struct aa_label *find_attach(const struct linux_binprm *bprm,
+                                   struct aa_ns *ns, struct list_head *head,
+                                   const char *name, const char **info)
 {
        int candidate_len = 0, candidate_xattrs = 0;
        bool conflict = false;
@@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
        AA_BUG(!name);
        AA_BUG(!head);
 
+       rcu_read_lock();
+restart:
        list_for_each_entry_rcu(profile, head, base.list) {
                if (profile->label.flags & FLAG_NULL &&
                    &profile->label == ns_unconfined(profile->ns))
@@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
                        perm = dfa_user_allow(profile->xmatch, state);
                        /* any accepting state means a valid match. */
                        if (perm & MAY_EXEC) {
-                               int ret;
+                               int ret = 0;
 
                                if (count < candidate_len)
                                        continue;
 
-                               ret = aa_xattrs_match(bprm, profile, state);
-                               /* Fail matching if the xattrs don't match */
-                               if (ret < 0)
-                                       continue;
-
+                               if (bprm && profile->xattr_count) {
+                                       long rev = READ_ONCE(ns->revision);
+
+                                       if (!aa_get_profile_not0(profile))
+                                               goto restart;
+                                       rcu_read_unlock();
+                                       ret = aa_xattrs_match(bprm, profile,
+                                                             state);
+                                       rcu_read_lock();
+                                       aa_put_profile(profile);
+                                       if (rev !=
+                                           READ_ONCE(ns->revision))
+                                               /* policy changed */
+                                               goto restart;
+                                       /*
+                                        * Fail matching if the xattrs don't
+                                        * match
+                                        */
+                                       if (ret < 0)
+                                               continue;
+                               }
                                /*
                                 * TODO: allow for more flexible best match
                                 *
@@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
                                candidate_xattrs = ret;
                                conflict = false;
                        }
-               } else if (!strcmp(profile->base.name, name))
+               } else if (!strcmp(profile->base.name, name)) {
                        /*
                         * old exact non-re match, without conditionals such
                         * as xattrs. no more searching required
                         */
-                       return profile;
+                       candidate = profile;
+                       goto out;
+               }
        }
 
-       if (conflict) {
-               *info = "conflicting profile attachments";
+       if (!candidate || conflict) {
+               if (conflict)
+                       *info = "conflicting profile attachments";
+               rcu_read_unlock();
                return NULL;
        }
 
-       return candidate;
-}
-
-/**
- * find_attach - do attachment search for unconfined processes
- * @bprm - binprm structure of transitioning task
- * @ns: the current namespace  (NOT NULL)
- * @list: list to search  (NOT NULL)
- * @name: the executable name to match against  (NOT NULL)
- * @info: info message if there was an error
- *
- * Returns: label or NULL if no match found
- */
-static struct aa_label *find_attach(const struct linux_binprm *bprm,
-                                   struct aa_ns *ns, struct list_head *list,
-                                   const char *name, const char **info)
-{
-       struct aa_profile *profile;
-
-       rcu_read_lock();
-       profile = aa_get_profile(__attach_match(bprm, name, list, info));
+out:
+       candidate = aa_get_newest_profile(candidate);
        rcu_read_unlock();
 
-       return profile ? &profile->label : NULL;
+       return &candidate->label;
 }
 
 static const char *next_name(int xtype, const char *name)
index fe2ebe5e865efcb14a4b84352e006b247f5ec2f2..f1caf3674e1c1cedf39c01fb800d40397218753a 100644 (file)
@@ -618,8 +618,7 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
        fctx = file_ctx(file);
 
        rcu_read_lock();
-       flabel  = aa_get_newest_label(rcu_dereference(fctx->label));
-       rcu_read_unlock();
+       flabel  = rcu_dereference(fctx->label);
        AA_BUG(!flabel);
 
        /* revalidate access, if task is unconfined, or the cached cred
@@ -631,9 +630,13 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
         */
        denied = request & ~fctx->allow;
        if (unconfined(label) || unconfined(flabel) ||
-           (!denied && aa_label_is_subset(flabel, label)))
+           (!denied && aa_label_is_subset(flabel, label))) {
+               rcu_read_unlock();
                goto done;
+       }
 
+       flabel  = aa_get_newest_label(flabel);
+       rcu_read_unlock();
        /* TODO: label cross check */
 
        if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
@@ -643,8 +646,9 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
        else if (S_ISSOCK(file_inode(file)->i_mode))
                error = __file_sock_perm(op, label, flabel, file, request,
                                         denied);
-done:
        aa_put_label(flabel);
+
+done:
        return error;
 }
 
index 4ed6688f9d404c1d73d6da7134b572db760ebb21..e0828ee7a34573edb6519836363d5ab4e462197e 100644 (file)
@@ -442,7 +442,7 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
        buffer = aa_get_buffer(false);
        old_buffer = aa_get_buffer(false);
        error = -ENOMEM;
-       if (!buffer || old_buffer)
+       if (!buffer || !old_buffer)
                goto out;
 
        error = fn_for_each_confined(label, profile,
index 03104830c9132a38574407a32a2c87536dbea8f6..269f2f53c0b115405eda0046249151b605d1d116 100644 (file)
@@ -1125,8 +1125,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
                mutex_lock_nested(&ns->parent->lock, ns->level);
-               __aa_remove_ns(ns);
                __aa_bump_ns_revision(ns);
+               __aa_remove_ns(ns);
                mutex_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
@@ -1138,9 +1138,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
                        goto fail_ns_lock;
                }
                name = profile->base.hname;
+               __aa_bump_ns_revision(ns);
                __remove_profile(profile);
                __aa_labelset_update_subtree(ns);
-               __aa_bump_ns_revision(ns);
                mutex_unlock(&ns->lock);
        }
 
index f19a895ad7cdf6c9bf24e05ffc6ee394ab3363ab..ef8dfd47c7e391328e2b1e7886b74c1ea87a231b 100644 (file)
@@ -45,7 +45,7 @@
 #define DONT_HASH      0x0200
 
 #define INVALID_PCR(a) (((a) < 0) || \
-       (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8))
+       (a) >= (sizeof_field(struct integrity_iint_cache, measured_pcrs) * 8))
 
 int ima_policy_flag;
 static int temp_ima_appraise;
@@ -274,7 +274,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
         * lsm rules can change
         */
        memcpy(nentry, entry, sizeof(*nentry));
-       memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm));
+       memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
 
        for (i = 0; i < MAX_LSM_RULES; i++) {
                if (!entry->lsm[i].rule)
index dd313438fecf9763eb5179f966620aaaef5d4ccc..47c041563d41c5fe16e71d70f7e001f48ff1b846 100644 (file)
@@ -21,10 +21,6 @@ config KEYS
 
          If you are unsure as to whether this is required, answer N.
 
-config KEYS_COMPAT
-       def_bool y
-       depends on COMPAT && KEYS
-
 config KEYS_REQUEST_CACHE
        bool "Enable temporary caching of the last request_key() result"
        depends on KEYS
index 074f27538f55968e1a3ee3d2187d89bfb99d8f44..5f40807f05b3d1d27cc21d875302af37959ef758 100644 (file)
@@ -17,7 +17,7 @@ obj-y := \
        request_key_auth.o \
        user_defined.o
 compat-obj-$(CONFIG_KEY_DH_OPERATIONS) += compat_dh.o
-obj-$(CONFIG_KEYS_COMPAT) += compat.o $(compat-obj-y)
+obj-$(CONFIG_COMPAT) += compat.o $(compat-obj-y)
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
 obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
index 9bcc404131aa0d30c86045d2b24a03856e939398..b975f8f11124b7f79112a3bced90673c9f5d7f5a 100644 (file)
@@ -46,11 +46,6 @@ static long compat_keyctl_instantiate_key_iov(
 
 /*
  * The key control system call, 32-bit compatibility version for 64-bit archs
- *
- * This should only be called if the 64-bit arch uses weird pointers in 32-bit
- * mode or doesn't guarantee that the top 32-bits of the argument registers on
- * taking a 32-bit syscall are zero.  If you can, you should call sys_keyctl()
- * directly.
  */
 COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
                       u32, arg2, u32, arg3, u32, arg4, u32, arg5)
index c039373488bd940d2a1c6dac919e46e586cd3002..ba3e2da14ceff60636d688cb1e4f78a7cb62b210 100644 (file)
@@ -264,7 +264,7 @@ extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
                              size_t, struct keyctl_kdf_params __user *);
 extern long __keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
                                size_t, struct keyctl_kdf_params *);
-#ifdef CONFIG_KEYS_COMPAT
+#ifdef CONFIG_COMPAT
 extern long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
                                char __user *buffer, size_t buflen,
                                struct compat_keyctl_kdf_params __user *kdf);
@@ -279,7 +279,7 @@ static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
        return -EOPNOTSUPP;
 }
 
-#ifdef CONFIG_KEYS_COMPAT
+#ifdef CONFIG_COMPAT
 static inline long compat_keyctl_dh_compute(
                                struct keyctl_dh_params __user *params,
                                char __user *buffer, size_t buflen,
index a9810ac2776f6c2fb75e598945b7dbace69f5d14..08ec7f48f01d09d643f2f0b69021686a88adad7e 100644 (file)
@@ -309,6 +309,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
                return rc;
 
        rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+       tpm2_flush_context(chip, blob_handle);
 
        return rc;
 }
index b2f87015d6e900ca2bc2593262b2c4904daf4721..5a952617a0eba388a99345206de469b666770122 100644 (file)
 
 static enum lockdown_reason kernel_locked_down;
 
-static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
-       [LOCKDOWN_NONE] = "none",
-       [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
-       [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
-       [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
-       [LOCKDOWN_KEXEC] = "kexec of unsigned images",
-       [LOCKDOWN_HIBERNATION] = "hibernation",
-       [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
-       [LOCKDOWN_IOPORT] = "raw io port access",
-       [LOCKDOWN_MSR] = "raw MSR access",
-       [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
-       [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
-       [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
-       [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
-       [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
-       [LOCKDOWN_DEBUGFS] = "debugfs access",
-       [LOCKDOWN_XMON_WR] = "xmon write access",
-       [LOCKDOWN_INTEGRITY_MAX] = "integrity",
-       [LOCKDOWN_KCORE] = "/proc/kcore access",
-       [LOCKDOWN_KPROBES] = "use of kprobes",
-       [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
-       [LOCKDOWN_PERF] = "unsafe use of perf",
-       [LOCKDOWN_TRACEFS] = "use of tracefs",
-       [LOCKDOWN_XMON_RW] = "xmon read and write access",
-       [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
-};
-
 static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE,
                                                 LOCKDOWN_INTEGRITY_MAX,
                                                 LOCKDOWN_CONFIDENTIALITY_MAX};
index e40874373f2b4a72b30648bb58a09f82fbd58a52..2d2bf49016f4f98a6135693131028d32b4f523e7 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/dccp.h>
 #include <linux/sctp.h>
 #include <linux/lsm_audit.h>
+#include <linux/security.h>
 
 /**
  * ipv4_skb_to_auditdata : fill auditdata from skb
@@ -425,6 +426,10 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                 a->u.ibendport->dev_name,
                                 a->u.ibendport->port);
                break;
+       case LSM_AUDIT_DATA_LOCKDOWN:
+               audit_log_format(ab, " lockdown_reason=");
+               audit_log_string(ab, lockdown_reasons[a->u.reason]);
+               break;
        } /* switch (a->type) */
 }
 
index cd2d18d2d279c87ae0b44454c410a3995166d999..2b5473d924160b76a73ef5dc4111caab58370b45 100644 (file)
 #define LSM_COUNT (__end_lsm_info - __start_lsm_info)
 #define EARLY_LSM_COUNT (__end_early_lsm_info - __start_early_lsm_info)
 
+/*
+ * These are descriptions of the reasons that can be passed to the
+ * security_locked_down() LSM hook. Placing this array here allows
+ * all security modules to use the same descriptions for auditing
+ * purposes.
+ */
+const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+       [LOCKDOWN_NONE] = "none",
+       [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
+       [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+       [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
+       [LOCKDOWN_KEXEC] = "kexec of unsigned images",
+       [LOCKDOWN_HIBERNATION] = "hibernation",
+       [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
+       [LOCKDOWN_IOPORT] = "raw io port access",
+       [LOCKDOWN_MSR] = "raw MSR access",
+       [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
+       [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
+       [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
+       [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
+       [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
+       [LOCKDOWN_DEBUGFS] = "debugfs access",
+       [LOCKDOWN_XMON_WR] = "xmon write access",
+       [LOCKDOWN_INTEGRITY_MAX] = "integrity",
+       [LOCKDOWN_KCORE] = "/proc/kcore access",
+       [LOCKDOWN_KPROBES] = "use of kprobes",
+       [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+       [LOCKDOWN_PERF] = "unsafe use of perf",
+       [LOCKDOWN_TRACEFS] = "use of tracefs",
+       [LOCKDOWN_XMON_RW] = "xmon read and write access",
+       [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
+};
+
 struct security_hook_heads security_hook_heads __lsm_ro_after_init;
 static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
 
index 5711689deb6a1b62ff7c1c4b9bc5dd5ddf7e59e3..1014cb0ee956cd575d5c32453cbae020d2a5cb8f 100644 (file)
@@ -42,6 +42,9 @@ config SECURITY_SELINUX_DISABLE
          using the selinux=0 boot parameter instead of enabling this
          option.
 
+         WARNING: this option is deprecated and will be removed in a future
+         kernel release.
+
          If you are unsure how to answer this question, answer N.
 
 config SECURITY_SELINUX_DEVELOP
@@ -55,7 +58,8 @@ config SECURITY_SELINUX_DEVELOP
          kernel will start in permissive mode (log everything, deny nothing)
          unless you specify enforcing=1 on the kernel command line.  You
          can interactively toggle the kernel between enforcing mode and
-         permissive mode (if permitted by the policy) via /selinux/enforce.
+         permissive mode (if permitted by the policy) via
+         /sys/fs/selinux/enforce.
 
 config SECURITY_SELINUX_AVC_STATS
        bool "NSA SELinux AVC Statistics"
@@ -63,7 +67,7 @@ config SECURITY_SELINUX_AVC_STATS
        default y
        help
          This option collects access vector cache statistics to
-         /selinux/avc/cache_stats, which may be monitored via
+         /sys/fs/selinux/avc/cache_stats, which may be monitored via
          tools such as avcstat.
 
 config SECURITY_SELINUX_CHECKREQPROT_VALUE
@@ -82,6 +86,29 @@ config SECURITY_SELINUX_CHECKREQPROT_VALUE
          default to checking the protection requested by the application.
          The checkreqprot flag may be changed from the default via the
          'checkreqprot=' boot parameter.  It may also be changed at runtime
-         via /selinux/checkreqprot if authorized by policy.
+         via /sys/fs/selinux/checkreqprot if authorized by policy.
 
          If you are unsure how to answer this question, answer 0.
+
+config SECURITY_SELINUX_SIDTAB_HASH_BITS
+       int "NSA SELinux sidtab hashtable size"
+       depends on SECURITY_SELINUX
+       range 8 13
+       default 9
+       help
+         This option sets the number of buckets used in the sidtab hashtable
+         to 2^SECURITY_SELINUX_SIDTAB_HASH_BITS buckets. The number of hash
+         collisions may be viewed at /sys/fs/selinux/ss/sidtab_hash_stats. If
+         chain lengths are high (e.g. > 20) then selecting a higher value here
+         will ensure that lookups times are short and stable.
+
+config SECURITY_SELINUX_SID2STR_CACHE_SIZE
+       int "NSA SELinux SID to context string translation cache size"
+       depends on SECURITY_SELINUX
+       default 256
+       help
+         This option defines the size of the internal SID -> context string
+         cache, which improves the performance of context to string
+         conversion.  Setting this option to 0 disables the cache completely.
+
+         If unsure, keep the default value.
index ccf95040938419a6980de885f5331380e24a6e9e..2000f95fb197751852a8ccdbfaad65341a586c87 100644 (file)
@@ -6,7 +6,7 @@
 obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
 
 selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
-            netnode.o netport.o ibpkey.o \
+            netnode.o netport.o \
             ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
             ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
 
@@ -14,6 +14,8 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
 
 selinux-$(CONFIG_NETLABEL) += netlabel.o
 
+selinux-$(CONFIG_SECURITY_INFINIBAND) += ibpkey.o
+
 ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
 
 $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
index ecd3829996aa4a16ed17f19c48434ae00fe1b09e..d18cb32a242ae1c5c5391fa13b2ba73d8a7f44b1 100644 (file)
@@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
        if (likely(!audited))
                return 0;
        return slow_avc_audit(state, ssid, tsid, tclass, requested,
-                       audited, denied, result, ad, 0);
+                       audited, denied, result, ad);
 }
 
 static void avc_node_free(struct rcu_head *rhead)
@@ -617,40 +617,37 @@ static struct avc_node *avc_insert(struct selinux_avc *avc,
        struct avc_node *pos, *node = NULL;
        int hvalue;
        unsigned long flag;
+       spinlock_t *lock;
+       struct hlist_head *head;
 
        if (avc_latest_notif_update(avc, avd->seqno, 1))
-               goto out;
+               return NULL;
 
        node = avc_alloc_node(avc);
-       if (node) {
-               struct hlist_head *head;
-               spinlock_t *lock;
-               int rc = 0;
-
-               hvalue = avc_hash(ssid, tsid, tclass);
-               avc_node_populate(node, ssid, tsid, tclass, avd);
-               rc = avc_xperms_populate(node, xp_node);
-               if (rc) {
-                       kmem_cache_free(avc_node_cachep, node);
-                       return NULL;
-               }
-               head = &avc->avc_cache.slots[hvalue];
-               lock = &avc->avc_cache.slots_lock[hvalue];
+       if (!node)
+               return NULL;
 
-               spin_lock_irqsave(lock, flag);
-               hlist_for_each_entry(pos, head, list) {
-                       if (pos->ae.ssid == ssid &&
-                           pos->ae.tsid == tsid &&
-                           pos->ae.tclass == tclass) {
-                               avc_node_replace(avc, node, pos);
-                               goto found;
-                       }
+       avc_node_populate(node, ssid, tsid, tclass, avd);
+       if (avc_xperms_populate(node, xp_node)) {
+               avc_node_kill(avc, node);
+               return NULL;
+       }
+
+       hvalue = avc_hash(ssid, tsid, tclass);
+       head = &avc->avc_cache.slots[hvalue];
+       lock = &avc->avc_cache.slots_lock[hvalue];
+       spin_lock_irqsave(lock, flag);
+       hlist_for_each_entry(pos, head, list) {
+               if (pos->ae.ssid == ssid &&
+                       pos->ae.tsid == tsid &&
+                       pos->ae.tclass == tclass) {
+                       avc_node_replace(avc, node, pos);
+                       goto found;
                }
-               hlist_add_head_rcu(&node->list, head);
-found:
-               spin_unlock_irqrestore(lock, flag);
        }
-out:
+       hlist_add_head_rcu(&node->list, head);
+found:
+       spin_unlock_irqrestore(lock, flag);
        return node;
 }
 
@@ -758,8 +755,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
 noinline int slow_avc_audit(struct selinux_state *state,
                            u32 ssid, u32 tsid, u16 tclass,
                            u32 requested, u32 audited, u32 denied, int result,
-                           struct common_audit_data *a,
-                           unsigned int flags)
+                           struct common_audit_data *a)
 {
        struct common_audit_data stack_data;
        struct selinux_audit_data sad;
@@ -772,17 +768,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
                a->type = LSM_AUDIT_DATA_NONE;
        }
 
-       /*
-        * When in a RCU walk do the audit on the RCU retry.  This is because
-        * the collection of the dname in an inode audit message is not RCU
-        * safe.  Note this may drop some audits when the situation changes
-        * during retry. However this is logically just as if the operation
-        * happened a little later.
-        */
-       if ((a->type == LSM_AUDIT_DATA_INODE) &&
-           (flags & MAY_NOT_BLOCK))
-               return -ECHILD;
-
        sad.tclass = tclass;
        sad.requested = requested;
        sad.ssid = ssid;
@@ -855,15 +840,14 @@ static int avc_update_node(struct selinux_avc *avc,
        /*
         * If we are in a non-blocking code path, e.g. VFS RCU walk,
         * then we must not add permissions to a cache entry
-        * because we cannot safely audit the denial.  Otherwise,
+        * because we will not audit the denial.  Otherwise,
         * during the subsequent blocking retry (e.g. VFS ref walk), we
         * will find the permissions already granted in the cache entry
         * and won't audit anything at all, leading to silent denials in
         * permissive mode that only appear when in enforcing mode.
         *
-        * See the corresponding handling in slow_avc_audit(), and the
-        * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
-        * which is transliterated into AVC_NONBLOCKING.
+        * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
+        * and selinux_inode_permission().
         */
        if (flags & AVC_NONBLOCKING)
                return 0;
@@ -907,7 +891,7 @@ static int avc_update_node(struct selinux_avc *avc,
        if (orig->ae.xp_node) {
                rc = avc_xperms_populate(node, orig->ae.xp_node);
                if (rc) {
-                       kmem_cache_free(avc_node_cachep, node);
+                       avc_node_kill(avc, node);
                        goto out_unlock;
                }
        }
@@ -1205,6 +1189,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
        return rc;
 }
 
+int avc_has_perm_flags(struct selinux_state *state,
+                      u32 ssid, u32 tsid, u16 tclass, u32 requested,
+                      struct common_audit_data *auditdata,
+                      int flags)
+{
+       struct av_decision avd;
+       int rc, rc2;
+
+       rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
+                                 (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
+                                 &avd);
+
+       rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
+                       auditdata, flags);
+       if (rc2)
+               return rc2;
+       return rc;
+}
+
 u32 avc_policy_seqno(struct selinux_state *state)
 {
        return state->avc->avc_cache.latest_notif;
index 116b4d644f68935a82083cdbdd232c5243d203cc..d9e8b2131a6501a1233b43490ecd0c4463c649bf 100644 (file)
@@ -109,7 +109,7 @@ struct selinux_state selinux_state;
 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
 
 #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
-static int selinux_enforcing_boot;
+static int selinux_enforcing_boot __initdata;
 
 static int __init enforcing_setup(char *str)
 {
@@ -123,13 +123,13 @@ __setup("enforcing=", enforcing_setup);
 #define selinux_enforcing_boot 1
 #endif
 
-int selinux_enabled __lsm_ro_after_init = 1;
+int selinux_enabled_boot __initdata = 1;
 #ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
 static int __init selinux_enabled_setup(char *str)
 {
        unsigned long enabled;
        if (!kstrtoul(str, 0, &enabled))
-               selinux_enabled = enabled ? 1 : 0;
+               selinux_enabled_boot = enabled ? 1 : 0;
        return 1;
 }
 __setup("selinux=", selinux_enabled_setup);
@@ -238,24 +238,6 @@ static inline u32 task_sid(const struct task_struct *task)
        return sid;
 }
 
-/* Allocate and free functions for each kind of security blob. */
-
-static int inode_alloc_security(struct inode *inode)
-{
-       struct inode_security_struct *isec = selinux_inode(inode);
-       u32 sid = current_sid();
-
-       spin_lock_init(&isec->lock);
-       INIT_LIST_HEAD(&isec->list);
-       isec->inode = inode;
-       isec->sid = SECINITSID_UNLABELED;
-       isec->sclass = SECCLASS_FILE;
-       isec->task_sid = sid;
-       isec->initialized = LABEL_INVALID;
-
-       return 0;
-}
-
 static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
 
 /*
@@ -272,7 +254,7 @@ static int __inode_security_revalidate(struct inode *inode,
 
        might_sleep_if(may_sleep);
 
-       if (selinux_state.initialized &&
+       if (selinux_initialized(&selinux_state) &&
            isec->initialized != LABEL_INITIALIZED) {
                if (!may_sleep)
                        return -ECHILD;
@@ -354,37 +336,6 @@ static void inode_free_security(struct inode *inode)
        }
 }
 
-static int file_alloc_security(struct file *file)
-{
-       struct file_security_struct *fsec = selinux_file(file);
-       u32 sid = current_sid();
-
-       fsec->sid = sid;
-       fsec->fown_sid = sid;
-
-       return 0;
-}
-
-static int superblock_alloc_security(struct super_block *sb)
-{
-       struct superblock_security_struct *sbsec;
-
-       sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
-       if (!sbsec)
-               return -ENOMEM;
-
-       mutex_init(&sbsec->lock);
-       INIT_LIST_HEAD(&sbsec->isec_head);
-       spin_lock_init(&sbsec->isec_lock);
-       sbsec->sb = sb;
-       sbsec->sid = SECINITSID_UNLABELED;
-       sbsec->def_sid = SECINITSID_FILE;
-       sbsec->mntpoint_sid = SECINITSID_UNLABELED;
-       sb->s_security = sbsec;
-
-       return 0;
-}
-
 static void superblock_free_security(struct super_block *sb)
 {
        struct superblock_security_struct *sbsec = sb->s_security;
@@ -406,11 +357,6 @@ static void selinux_free_mnt_opts(void *mnt_opts)
        kfree(opts);
 }
 
-static inline int inode_doinit(struct inode *inode)
-{
-       return inode_doinit_with_dentry(inode, NULL);
-}
-
 enum {
        Opt_error = -1,
        Opt_context = 0,
@@ -598,7 +544,7 @@ static int sb_finish_set_opts(struct super_block *sb)
                inode = igrab(inode);
                if (inode) {
                        if (!IS_PRIVATE(inode))
-                               inode_doinit(inode);
+                               inode_doinit_with_dentry(inode, NULL);
                        iput(inode);
                }
                spin_lock(&sbsec->isec_lock);
@@ -659,7 +605,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
 
        mutex_lock(&sbsec->lock);
 
-       if (!selinux_state.initialized) {
+       if (!selinux_initialized(&selinux_state)) {
                if (!opts) {
                        /* Defer initialization until selinux_complete_init,
                           after the initial policy is loaded and the security
@@ -752,6 +698,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
 
        if (!strcmp(sb->s_type->name, "debugfs") ||
            !strcmp(sb->s_type->name, "tracefs") ||
+           !strcmp(sb->s_type->name, "binderfs") ||
            !strcmp(sb->s_type->name, "pstore"))
                sbsec->flags |= SE_SBGENFS;
 
@@ -928,7 +875,7 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
         * if the parent was able to be mounted it clearly had no special lsm
         * mount options.  thus we can safely deal with this superblock later
         */
-       if (!selinux_state.initialized)
+       if (!selinux_initialized(&selinux_state))
                return 0;
 
        /*
@@ -1103,7 +1050,7 @@ static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
        if (!(sbsec->flags & SE_SBINITIALIZED))
                return 0;
 
-       if (!selinux_state.initialized)
+       if (!selinux_initialized(&selinux_state))
                return 0;
 
        if (sbsec->flags & FSCONTEXT_MNT) {
@@ -1833,8 +1780,8 @@ static int may_create(struct inode *dir,
        if (rc)
                return rc;
 
-       rc = selinux_determine_inode_label(selinux_cred(current_cred()), dir,
-                                          &dentry->d_name, tclass, &newsid);
+       rc = selinux_determine_inode_label(tsec, dir, &dentry->d_name, tclass,
+                                          &newsid);
        if (rc)
                return rc;
 
@@ -2592,7 +2539,22 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
 
 static int selinux_sb_alloc_security(struct super_block *sb)
 {
-       return superblock_alloc_security(sb);
+       struct superblock_security_struct *sbsec;
+
+       sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
+       if (!sbsec)
+               return -ENOMEM;
+
+       mutex_init(&sbsec->lock);
+       INIT_LIST_HEAD(&sbsec->isec_head);
+       spin_lock_init(&sbsec->isec_lock);
+       sbsec->sb = sb;
+       sbsec->sid = SECINITSID_UNLABELED;
+       sbsec->def_sid = SECINITSID_FILE;
+       sbsec->mntpoint_sid = SECINITSID_UNLABELED;
+       sb->s_security = sbsec;
+
+       return 0;
 }
 
 static void selinux_sb_free_security(struct super_block *sb)
@@ -2762,6 +2724,14 @@ static int selinux_mount(const char *dev_name,
                return path_has_perm(cred, path, FILE__MOUNTON);
 }
 
+static int selinux_move_mount(const struct path *from_path,
+                             const struct path *to_path)
+{
+       const struct cred *cred = current_cred();
+
+       return path_has_perm(cred, to_path, FILE__MOUNTON);
+}
+
 static int selinux_umount(struct vfsmount *mnt, int flags)
 {
        const struct cred *cred = current_cred();
@@ -2844,7 +2814,18 @@ static int selinux_fs_context_parse_param(struct fs_context *fc,
 
 static int selinux_inode_alloc_security(struct inode *inode)
 {
-       return inode_alloc_security(inode);
+       struct inode_security_struct *isec = selinux_inode(inode);
+       u32 sid = current_sid();
+
+       spin_lock_init(&isec->lock);
+       INIT_LIST_HEAD(&isec->list);
+       isec->inode = inode;
+       isec->sid = SECINITSID_UNLABELED;
+       isec->sclass = SECCLASS_FILE;
+       isec->task_sid = sid;
+       isec->initialized = LABEL_INVALID;
+
+       return 0;
 }
 
 static void selinux_inode_free_security(struct inode *inode)
@@ -2906,8 +2887,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
 
        newsid = tsec->create_sid;
 
-       rc = selinux_determine_inode_label(selinux_cred(current_cred()),
-               dir, qstr,
+       rc = selinux_determine_inode_label(tsec, dir, qstr,
                inode_mode_to_security_class(inode->i_mode),
                &newsid);
        if (rc)
@@ -2921,7 +2901,8 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
                isec->initialized = LABEL_INITIALIZED;
        }
 
-       if (!selinux_state.initialized || !(sbsec->flags & SBLABEL_MNT))
+       if (!selinux_initialized(&selinux_state) ||
+           !(sbsec->flags & SBLABEL_MNT))
                return -EOPNOTSUPP;
 
        if (name)
@@ -3004,14 +2985,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
        if (IS_ERR(isec))
                return PTR_ERR(isec);
 
-       return avc_has_perm(&selinux_state,
-                           sid, isec->sid, isec->sclass, FILE__READ, &ad);
+       return avc_has_perm_flags(&selinux_state,
+                                 sid, isec->sid, isec->sclass, FILE__READ, &ad,
+                                 rcu ? MAY_NOT_BLOCK : 0);
 }
 
 static noinline int audit_inode_permission(struct inode *inode,
                                           u32 perms, u32 audited, u32 denied,
-                                          int result,
-                                          unsigned flags)
+                                          int result)
 {
        struct common_audit_data ad;
        struct inode_security_struct *isec = selinux_inode(inode);
@@ -3022,7 +3003,7 @@ static noinline int audit_inode_permission(struct inode *inode,
 
        rc = slow_avc_audit(&selinux_state,
                            current_sid(), isec->sid, isec->sclass, perms,
-                           audited, denied, result, &ad, flags);
+                           audited, denied, result, &ad);
        if (rc)
                return rc;
        return 0;
@@ -3033,7 +3014,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
        const struct cred *cred = current_cred();
        u32 perms;
        bool from_access;
-       unsigned flags = mask & MAY_NOT_BLOCK;
+       bool no_block = mask & MAY_NOT_BLOCK;
        struct inode_security_struct *isec;
        u32 sid;
        struct av_decision avd;
@@ -3055,13 +3036,13 @@ static int selinux_inode_permission(struct inode *inode, int mask)
        perms = file_mask_to_av(inode->i_mode, mask);
 
        sid = cred_sid(cred);
-       isec = inode_security_rcu(inode, flags & MAY_NOT_BLOCK);
+       isec = inode_security_rcu(inode, no_block);
        if (IS_ERR(isec))
                return PTR_ERR(isec);
 
        rc = avc_has_perm_noaudit(&selinux_state,
                                  sid, isec->sid, isec->sclass, perms,
-                                 (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
+                                 no_block ? AVC_NONBLOCKING : 0,
                                  &avd);
        audited = avc_audit_required(perms, &avd, rc,
                                     from_access ? FILE__AUDIT_ACCESS : 0,
@@ -3069,7 +3050,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
        if (likely(!audited))
                return rc;
 
-       rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
+       /* fall back to ref-walk if we have to generate audit */
+       if (no_block)
+               return -ECHILD;
+
+       rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
        if (rc2)
                return rc2;
        return rc;
@@ -3140,7 +3125,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
                return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
        }
 
-       if (!selinux_state.initialized)
+       if (!selinux_initialized(&selinux_state))
                return (inode_owner_or_capable(inode) ? 0 : -EPERM);
 
        sbsec = inode->i_sb->s_security;
@@ -3226,7 +3211,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
                return;
        }
 
-       if (!selinux_state.initialized) {
+       if (!selinux_initialized(&selinux_state)) {
                /* If we haven't even been initialized, then we can't validate
                 * against a policy, so leave the label as invalid. It may
                 * resolve to a valid label on the next revalidation try if
@@ -3550,7 +3535,13 @@ static int selinux_file_permission(struct file *file, int mask)
 
 static int selinux_file_alloc_security(struct file *file)
 {
-       return file_alloc_security(file);
+       struct file_security_struct *fsec = selinux_file(file);
+       u32 sid = current_sid();
+
+       fsec->sid = sid;
+       fsec->fown_sid = sid;
+
+       return 0;
 }
 
 /*
@@ -3643,7 +3634,7 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
        return error;
 }
 
-static int default_noexec;
+static int default_noexec __ro_after_init;
 
 static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
 {
@@ -5515,44 +5506,6 @@ static int selinux_tun_dev_open(void *security)
        return 0;
 }
 
-static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
-{
-       int err = 0;
-       u32 perm;
-       struct nlmsghdr *nlh;
-       struct sk_security_struct *sksec = sk->sk_security;
-
-       if (skb->len < NLMSG_HDRLEN) {
-               err = -EINVAL;
-               goto out;
-       }
-       nlh = nlmsg_hdr(skb);
-
-       err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
-       if (err) {
-               if (err == -EINVAL) {
-                       pr_warn_ratelimited("SELinux: unrecognized netlink"
-                              " message: protocol=%hu nlmsg_type=%hu sclass=%s"
-                              " pig=%d comm=%s\n",
-                              sk->sk_protocol, nlh->nlmsg_type,
-                              secclass_map[sksec->sclass - 1].name,
-                              task_pid_nr(current), current->comm);
-                       if (!enforcing_enabled(&selinux_state) ||
-                           security_get_allow_unknown(&selinux_state))
-                               err = 0;
-               }
-
-               /* Ignore */
-               if (err == -ENOENT)
-                       err = 0;
-               goto out;
-       }
-
-       err = sock_has_perm(sk, perm);
-out:
-       return err;
-}
-
 #ifdef CONFIG_NETFILTER
 
 static unsigned int selinux_ip_forward(struct sk_buff *skb,
@@ -5881,7 +5834,40 @@ static unsigned int selinux_ipv6_postroute(void *priv,
 
 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-       return selinux_nlmsg_perm(sk, skb);
+       int err = 0;
+       u32 perm;
+       struct nlmsghdr *nlh;
+       struct sk_security_struct *sksec = sk->sk_security;
+
+       if (skb->len < NLMSG_HDRLEN) {
+               err = -EINVAL;
+               goto out;
+       }
+       nlh = nlmsg_hdr(skb);
+
+       err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
+       if (err) {
+               if (err == -EINVAL) {
+                       pr_warn_ratelimited("SELinux: unrecognized netlink"
+                              " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+                              " pid=%d comm=%s\n",
+                              sk->sk_protocol, nlh->nlmsg_type,
+                              secclass_map[sksec->sclass - 1].name,
+                              task_pid_nr(current), current->comm);
+                       if (!enforcing_enabled(&selinux_state) ||
+                           security_get_allow_unknown(&selinux_state))
+                               err = 0;
+               }
+
+               /* Ignore */
+               if (err == -ENOENT)
+                       err = 0;
+               goto out;
+       }
+
+       err = sock_has_perm(sk, perm);
+out:
+       return err;
 }
 
 static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
@@ -5890,16 +5876,6 @@ static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
        isec->sid = current_sid();
 }
 
-static int msg_msg_alloc_security(struct msg_msg *msg)
-{
-       struct msg_security_struct *msec;
-
-       msec = selinux_msg_msg(msg);
-       msec->sid = SECINITSID_UNLABELED;
-
-       return 0;
-}
-
 static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
                        u32 perms)
 {
@@ -5918,7 +5894,12 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
 
 static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
 {
-       return msg_msg_alloc_security(msg);
+       struct msg_security_struct *msec;
+
+       msec = selinux_msg_msg(msg);
+       msec->sid = SECINITSID_UNLABELED;
+
+       return 0;
 }
 
 /* message queue security operations */
@@ -6795,6 +6776,34 @@ static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
 }
 #endif
 
+static int selinux_lockdown(enum lockdown_reason what)
+{
+       struct common_audit_data ad;
+       u32 sid = current_sid();
+       int invalid_reason = (what <= LOCKDOWN_NONE) ||
+                            (what == LOCKDOWN_INTEGRITY_MAX) ||
+                            (what >= LOCKDOWN_CONFIDENTIALITY_MAX);
+
+       if (WARN(invalid_reason, "Invalid lockdown reason")) {
+               audit_log(audit_context(),
+                         GFP_ATOMIC, AUDIT_SELINUX_ERR,
+                         "lockdown_reason=invalid");
+               return -EINVAL;
+       }
+
+       ad.type = LSM_AUDIT_DATA_LOCKDOWN;
+       ad.u.reason = what;
+
+       if (what <= LOCKDOWN_INTEGRITY_MAX)
+               return avc_has_perm(&selinux_state,
+                                   sid, sid, SECCLASS_LOCKDOWN,
+                                   LOCKDOWN__INTEGRITY, &ad);
+       else
+               return avc_has_perm(&selinux_state,
+                                   sid, sid, SECCLASS_LOCKDOWN,
+                                   LOCKDOWN__CONFIDENTIALITY, &ad);
+}
+
 struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
        .lbs_cred = sizeof(struct task_security_struct),
        .lbs_file = sizeof(struct file_security_struct),
@@ -6864,6 +6873,21 @@ static int selinux_perf_event_write(struct perf_event *event)
 }
 #endif
 
+/*
+ * IMPORTANT NOTE: When adding new hooks, please be careful to keep this order:
+ * 1. any hooks that don't belong to (2.) or (3.) below,
+ * 2. hooks that both access structures allocated by other hooks, and allocate
+ *    structures that can be later accessed by other hooks (mostly "cloning"
+ *    hooks),
+ * 3. hooks that only allocate structures that can be later accessed by other
+ *    hooks ("allocating" hooks).
+ *
+ * Please follow block comment delimiters in the list to keep this order.
+ *
+ * This ordering is needed for SELinux runtime disable to work at least somewhat
+ * safely. Breaking the ordering rules above might lead to NULL pointer derefs
+ * when disabling SELinux at runtime.
+ */
 static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
        LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
@@ -6886,12 +6910,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
        LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
 
-       LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
-       LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
-
-       LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
        LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security),
-       LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
        LSM_HOOK_INIT(sb_free_mnt_opts, selinux_free_mnt_opts),
        LSM_HOOK_INIT(sb_remount, selinux_sb_remount),
        LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount),
@@ -6901,12 +6920,12 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(sb_umount, selinux_umount),
        LSM_HOOK_INIT(sb_set_mnt_opts, selinux_set_mnt_opts),
        LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
-       LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
+
+       LSM_HOOK_INIT(move_mount, selinux_move_mount),
 
        LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
        LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
 
-       LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
        LSM_HOOK_INIT(inode_free_security, selinux_inode_free_security),
        LSM_HOOK_INIT(inode_init_security, selinux_inode_init_security),
        LSM_HOOK_INIT(inode_create, selinux_inode_create),
@@ -6978,21 +6997,15 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(ipc_permission, selinux_ipc_permission),
        LSM_HOOK_INIT(ipc_getsecid, selinux_ipc_getsecid),
 
-       LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
-
-       LSM_HOOK_INIT(msg_queue_alloc_security,
-                       selinux_msg_queue_alloc_security),
        LSM_HOOK_INIT(msg_queue_associate, selinux_msg_queue_associate),
        LSM_HOOK_INIT(msg_queue_msgctl, selinux_msg_queue_msgctl),
        LSM_HOOK_INIT(msg_queue_msgsnd, selinux_msg_queue_msgsnd),
        LSM_HOOK_INIT(msg_queue_msgrcv, selinux_msg_queue_msgrcv),
 
-       LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
        LSM_HOOK_INIT(shm_associate, selinux_shm_associate),
        LSM_HOOK_INIT(shm_shmctl, selinux_shm_shmctl),
        LSM_HOOK_INIT(shm_shmat, selinux_shm_shmat),
 
-       LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
        LSM_HOOK_INIT(sem_associate, selinux_sem_associate),
        LSM_HOOK_INIT(sem_semctl, selinux_sem_semctl),
        LSM_HOOK_INIT(sem_semop, selinux_sem_semop),
@@ -7003,13 +7016,11 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(setprocattr, selinux_setprocattr),
 
        LSM_HOOK_INIT(ismaclabel, selinux_ismaclabel),
-       LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
        LSM_HOOK_INIT(secctx_to_secid, selinux_secctx_to_secid),
        LSM_HOOK_INIT(release_secctx, selinux_release_secctx),
        LSM_HOOK_INIT(inode_invalidate_secctx, selinux_inode_invalidate_secctx),
        LSM_HOOK_INIT(inode_notifysecctx, selinux_inode_notifysecctx),
        LSM_HOOK_INIT(inode_setsecctx, selinux_inode_setsecctx),
-       LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),
 
        LSM_HOOK_INIT(unix_stream_connect, selinux_socket_unix_stream_connect),
        LSM_HOOK_INIT(unix_may_send, selinux_socket_unix_may_send),
@@ -7032,7 +7043,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(socket_getpeersec_stream,
                        selinux_socket_getpeersec_stream),
        LSM_HOOK_INIT(socket_getpeersec_dgram, selinux_socket_getpeersec_dgram),
-       LSM_HOOK_INIT(sk_alloc_security, selinux_sk_alloc_security),
        LSM_HOOK_INIT(sk_free_security, selinux_sk_free_security),
        LSM_HOOK_INIT(sk_clone_security, selinux_sk_clone_security),
        LSM_HOOK_INIT(sk_getsecid, selinux_sk_getsecid),
@@ -7047,7 +7057,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(secmark_refcount_inc, selinux_secmark_refcount_inc),
        LSM_HOOK_INIT(secmark_refcount_dec, selinux_secmark_refcount_dec),
        LSM_HOOK_INIT(req_classify_flow, selinux_req_classify_flow),
-       LSM_HOOK_INIT(tun_dev_alloc_security, selinux_tun_dev_alloc_security),
        LSM_HOOK_INIT(tun_dev_free_security, selinux_tun_dev_free_security),
        LSM_HOOK_INIT(tun_dev_create, selinux_tun_dev_create),
        LSM_HOOK_INIT(tun_dev_attach_queue, selinux_tun_dev_attach_queue),
@@ -7057,17 +7066,11 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(ib_pkey_access, selinux_ib_pkey_access),
        LSM_HOOK_INIT(ib_endport_manage_subnet,
                      selinux_ib_endport_manage_subnet),
-       LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security),
        LSM_HOOK_INIT(ib_free_security, selinux_ib_free_security),
 #endif
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
-       LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
-       LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
        LSM_HOOK_INIT(xfrm_policy_free_security, selinux_xfrm_policy_free),
        LSM_HOOK_INIT(xfrm_policy_delete_security, selinux_xfrm_policy_delete),
-       LSM_HOOK_INIT(xfrm_state_alloc, selinux_xfrm_state_alloc),
-       LSM_HOOK_INIT(xfrm_state_alloc_acquire,
-                       selinux_xfrm_state_alloc_acquire),
        LSM_HOOK_INIT(xfrm_state_free_security, selinux_xfrm_state_free),
        LSM_HOOK_INIT(xfrm_state_delete_security, selinux_xfrm_state_delete),
        LSM_HOOK_INIT(xfrm_policy_lookup, selinux_xfrm_policy_lookup),
@@ -7077,14 +7080,12 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
 #endif
 
 #ifdef CONFIG_KEYS
-       LSM_HOOK_INIT(key_alloc, selinux_key_alloc),
        LSM_HOOK_INIT(key_free, selinux_key_free),
        LSM_HOOK_INIT(key_permission, selinux_key_permission),
        LSM_HOOK_INIT(key_getsecurity, selinux_key_getsecurity),
 #endif
 
 #ifdef CONFIG_AUDIT
-       LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
        LSM_HOOK_INIT(audit_rule_known, selinux_audit_rule_known),
        LSM_HOOK_INIT(audit_rule_match, selinux_audit_rule_match),
        LSM_HOOK_INIT(audit_rule_free, selinux_audit_rule_free),
@@ -7094,19 +7095,66 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(bpf, selinux_bpf),
        LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
        LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
-       LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
-       LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
        LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
        LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
 #endif
 
 #ifdef CONFIG_PERF_EVENTS
        LSM_HOOK_INIT(perf_event_open, selinux_perf_event_open),
-       LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
        LSM_HOOK_INIT(perf_event_free, selinux_perf_event_free),
        LSM_HOOK_INIT(perf_event_read, selinux_perf_event_read),
        LSM_HOOK_INIT(perf_event_write, selinux_perf_event_write),
 #endif
+
+       LSM_HOOK_INIT(locked_down, selinux_lockdown),
+
+       /*
+        * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
+        */
+       LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
+       LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
+       LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
+       LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+       LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
+#endif
+
+       /*
+        * PUT "ALLOCATING" HOOKS HERE
+        */
+       LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
+       LSM_HOOK_INIT(msg_queue_alloc_security,
+                     selinux_msg_queue_alloc_security),
+       LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
+       LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
+       LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
+       LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
+       LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
+       LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),
+       LSM_HOOK_INIT(sk_alloc_security, selinux_sk_alloc_security),
+       LSM_HOOK_INIT(tun_dev_alloc_security, selinux_tun_dev_alloc_security),
+#ifdef CONFIG_SECURITY_INFINIBAND
+       LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security),
+#endif
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+       LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
+       LSM_HOOK_INIT(xfrm_state_alloc, selinux_xfrm_state_alloc),
+       LSM_HOOK_INIT(xfrm_state_alloc_acquire,
+                     selinux_xfrm_state_alloc_acquire),
+#endif
+#ifdef CONFIG_KEYS
+       LSM_HOOK_INIT(key_alloc, selinux_key_alloc),
+#endif
+#ifdef CONFIG_AUDIT
+       LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
+#endif
+#ifdef CONFIG_BPF_SYSCALL
+       LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
+       LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
+#endif
+#ifdef CONFIG_PERF_EVENTS
+       LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
+#endif
 };
 
 static __init int selinux_init(void)
@@ -7169,7 +7217,7 @@ void selinux_complete_init(void)
 DEFINE_LSM(selinux) = {
        .name = "selinux",
        .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
-       .enabled = &selinux_enabled,
+       .enabled = &selinux_enabled_boot,
        .blobs = &selinux_blob_sizes,
        .init = selinux_init,
 };
@@ -7238,7 +7286,7 @@ static int __init selinux_nf_ip_init(void)
 {
        int err;
 
-       if (!selinux_enabled)
+       if (!selinux_enabled_boot)
                return 0;
 
        pr_debug("SELinux:  Registering netfilter hooks\n");
@@ -7271,30 +7319,32 @@ static void selinux_nf_ip_exit(void)
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 int selinux_disable(struct selinux_state *state)
 {
-       if (state->initialized) {
+       if (selinux_initialized(state)) {
                /* Not permitted after initial policy load. */
                return -EINVAL;
        }
 
-       if (state->disabled) {
+       if (selinux_disabled(state)) {
                /* Only do this once. */
                return -EINVAL;
        }
 
-       state->disabled = 1;
+       selinux_mark_disabled(state);
 
        pr_info("SELinux:  Disabled at runtime.\n");
 
-       selinux_enabled = 0;
+       /*
+        * Unregister netfilter hooks.
+        * Must be done before security_delete_hooks() to avoid breaking
+        * runtime disable.
+        */
+       selinux_nf_ip_exit();
 
        security_delete_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
 
        /* Try to destroy the avc node cache */
        avc_disable();
 
-       /* Unregister netfilter hooks. */
-       selinux_nf_ip_exit();
-
        /* Unregister selinuxfs. */
        exit_sel_fs();
 
index de92365e4324b6e1344fdcff6e758c158c9f7b0a..f68a7617cfb95e8e2bf65e3e9ea9f12227004568 100644 (file)
@@ -222,7 +222,7 @@ static __init int sel_ib_pkey_init(void)
 {
        int iter;
 
-       if (!selinux_enabled)
+       if (!selinux_enabled_boot)
                return 0;
 
        for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) {
index 7be0e1e90e8be0fcf0f84e14367b7173d7f1dd90..cf4cc3ef959b5b88c13b785e54394a0d45765610 100644 (file)
@@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
 int slow_avc_audit(struct selinux_state *state,
                   u32 ssid, u32 tsid, u16 tclass,
                   u32 requested, u32 audited, u32 denied, int result,
-                  struct common_audit_data *a,
-                  unsigned flags);
+                  struct common_audit_data *a);
 
 /**
  * avc_audit - Audit the granting or denial of permissions.
@@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
        audited = avc_audit_required(requested, avd, result, 0, &denied);
        if (likely(!audited))
                return 0;
+       /* fall back to ref-walk if we have to generate audit */
+       if (flags & MAY_NOT_BLOCK)
+               return -ECHILD;
        return slow_avc_audit(state, ssid, tsid, tclass,
                              requested, audited, denied, result,
-                             a, flags);
+                             a);
 }
 
 #define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
                 u32 ssid, u32 tsid,
                 u16 tclass, u32 requested,
                 struct common_audit_data *auditdata);
+int avc_has_perm_flags(struct selinux_state *state,
+                      u32 ssid, u32 tsid,
+                      u16 tclass, u32 requested,
+                      struct common_audit_data *auditdata,
+                      int flags);
 
 int avc_has_extended_perms(struct selinux_state *state,
                           u32 ssid, u32 tsid, u16 tclass, u32 requested,
index 7db24855e12d4c19c1162e6ad04b254f92bd036e..986f3ac14282f8a760b5346c7661524fa14209af 100644 (file)
@@ -246,6 +246,8 @@ struct security_class_mapping secclass_map[] = {
          { COMMON_SOCK_PERMS, NULL } },
        { "perf_event",
          {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
+       { "lockdown",
+         { "integrity", "confidentiality", NULL } },
        { NULL }
   };
 
index a2ebe397bcb78891d1b167c234433b271fc34f49..e6ac1d23320b6728cc1ccd5521daea599d2207b4 100644 (file)
 #ifndef _SELINUX_IB_PKEY_H
 #define _SELINUX_IB_PKEY_H
 
+#ifdef CONFIG_SECURITY_INFINIBAND
 void sel_ib_pkey_flush(void);
-
 int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid);
+#else
+static inline void sel_ib_pkey_flush(void)
+{
+       return;
+}
+static inline int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid)
+{
+       *sid = SECINITSID_UNLABELED;
+       return 0;
+}
+#endif
 
 #endif
index a4a86cbcfb0aca894d4113eb54671e0faafeedad..330b7b6d44e0af0406840770bf5d4f2030ad4752 100644 (file)
@@ -35,7 +35,7 @@ struct task_security_struct {
        u32 create_sid;         /* fscreate SID */
        u32 keycreate_sid;      /* keycreate SID */
        u32 sockcreate_sid;     /* fscreate SID */
-};
+} __randomize_layout;
 
 enum label_initialized {
        LABEL_INVALID,          /* invalid or not initialized */
index ae840634e3c7da3901f81bea23e4c48251cbf0e3..a39f9565d80b7ee93e7427e41780241e746fb203 100644 (file)
@@ -69,7 +69,7 @@
 
 struct netlbl_lsm_secattr;
 
-extern int selinux_enabled;
+extern int selinux_enabled_boot;
 
 /* Policy capabilities */
 enum {
@@ -99,7 +99,9 @@ struct selinux_avc;
 struct selinux_ss;
 
 struct selinux_state {
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
        bool disabled;
+#endif
 #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
        bool enforcing;
 #endif
@@ -108,22 +110,34 @@ struct selinux_state {
        bool policycap[__POLICYDB_CAPABILITY_MAX];
        struct selinux_avc *avc;
        struct selinux_ss *ss;
-};
+} __randomize_layout;
 
 void selinux_ss_init(struct selinux_ss **ss);
 void selinux_avc_init(struct selinux_avc **avc);
 
 extern struct selinux_state selinux_state;
 
+static inline bool selinux_initialized(const struct selinux_state *state)
+{
+       /* do a synchronized load to avoid race conditions */
+       return smp_load_acquire(&state->initialized);
+}
+
+static inline void selinux_mark_initialized(struct selinux_state *state)
+{
+       /* do a synchronized write to avoid race conditions */
+       smp_store_release(&state->initialized, true);
+}
+
 #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
 static inline bool enforcing_enabled(struct selinux_state *state)
 {
-       return state->enforcing;
+       return READ_ONCE(state->enforcing);
 }
 
 static inline void enforcing_set(struct selinux_state *state, bool value)
 {
-       state->enforcing = value;
+       WRITE_ONCE(state->enforcing, value);
 }
 #else
 static inline bool enforcing_enabled(struct selinux_state *state)
@@ -136,6 +150,23 @@ static inline void enforcing_set(struct selinux_state *state, bool value)
 }
 #endif
 
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static inline bool selinux_disabled(struct selinux_state *state)
+{
+       return READ_ONCE(state->disabled);
+}
+
+static inline void selinux_mark_disabled(struct selinux_state *state)
+{
+       WRITE_ONCE(state->disabled, true);
+}
+#else
+static inline bool selinux_disabled(struct selinux_state *state)
+{
+       return false;
+}
+#endif
+
 static inline bool selinux_policycap_netpeer(void)
 {
        struct selinux_state *state = &selinux_state;
@@ -395,5 +426,6 @@ extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
 extern void avtab_cache_init(void);
 extern void ebitmap_cache_init(void);
 extern void hashtab_cache_init(void);
+extern int security_sidtab_hash_stats(struct selinux_state *state, char *page);
 
 #endif /* _SELINUX_SECURITY_H_ */
index e40fecd73752489a1e87c0c51ee8eef00c006994..15b8c1bcd7d0cce7183b08b4000d40c01dcb4cb5 100644 (file)
@@ -266,7 +266,7 @@ static __init int sel_netif_init(void)
 {
        int i;
 
-       if (!selinux_enabled)
+       if (!selinux_enabled_boot)
                return 0;
 
        for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
index 9ab84efa46c7eb5cd640f7db3b3598f34dcd567b..dff587d1e16417f56794432014b93ac2abf99521 100644 (file)
@@ -291,7 +291,7 @@ static __init int sel_netnode_init(void)
 {
        int iter;
 
-       if (!selinux_enabled)
+       if (!selinux_enabled_boot)
                return 0;
 
        for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
index 3f8b2c0458c88a4b27f668c31a2b1d09006415d0..de727f7489b7660026b50229ccb3eb837109a759 100644 (file)
@@ -225,7 +225,7 @@ static __init int sel_netport_init(void)
 {
        int iter;
 
-       if (!selinux_enabled)
+       if (!selinux_enabled_boot)
                return 0;
 
        for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
index ee94fa469c29edb0fbfc07d151d91710df86b265..79c710911a3c9ed2fe4ab0ef405d389e72eaa01f 100644 (file)
@@ -168,11 +168,10 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
                        goto out;
                audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_STATUS,
                        "enforcing=%d old_enforcing=%d auid=%u ses=%u"
-                       " enabled=%d old-enabled=%d lsm=selinux res=1",
+                       " enabled=1 old-enabled=1 lsm=selinux res=1",
                        new_value, old_value,
                        from_kuid(&init_user_ns, audit_get_loginuid(current)),
-                       audit_get_sessionid(current),
-                       selinux_enabled, selinux_enabled);
+                       audit_get_sessionid(current));
                enforcing_set(state, new_value);
                if (new_value)
                        avc_ss_reset(state->avc, 0);
@@ -282,6 +281,13 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
        int new_value;
        int enforcing;
 
+       /* NOTE: we are now officially considering runtime disable as
+        *       deprecated, and using it will become increasingly painful
+        *       (e.g. sleeping/blocking) as we progress through future
+        *       kernel releases until eventually it is removed
+        */
+       pr_err("SELinux:  Runtime disable is deprecated, use selinux=0 on the kernel cmdline.\n");
+
        if (count >= PAGE_SIZE)
                return -ENOMEM;
 
@@ -304,10 +310,10 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
                        goto out;
                audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_STATUS,
                        "enforcing=%d old_enforcing=%d auid=%u ses=%u"
-                       " enabled=%d old-enabled=%d lsm=selinux res=1",
+                       " enabled=0 old-enabled=1 lsm=selinux res=1",
                        enforcing, enforcing,
                        from_kuid(&init_user_ns, audit_get_loginuid(current)),
-                       audit_get_sessionid(current), 0, 1);
+                       audit_get_sessionid(current));
        }
 
        length = count;
@@ -1482,6 +1488,32 @@ static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
        return length;
 }
 
+static ssize_t sel_read_sidtab_hash_stats(struct file *filp, char __user *buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+       struct selinux_state *state = fsi->state;
+       char *page;
+       ssize_t length;
+
+       page = (char *)__get_free_page(GFP_KERNEL);
+       if (!page)
+               return -ENOMEM;
+
+       length = security_sidtab_hash_stats(state, page);
+       if (length >= 0)
+               length = simple_read_from_buffer(buf, count, ppos, page,
+                                               length);
+       free_page((unsigned long)page);
+
+       return length;
+}
+
+static const struct file_operations sel_sidtab_hash_stats_ops = {
+       .read           = sel_read_sidtab_hash_stats,
+       .llseek         = generic_file_llseek,
+};
+
 static const struct file_operations sel_avc_cache_threshold_ops = {
        .read           = sel_read_avc_cache_threshold,
        .write          = sel_write_avc_cache_threshold,
@@ -1599,6 +1631,37 @@ static int sel_make_avc_files(struct dentry *dir)
        return 0;
 }
 
+static int sel_make_ss_files(struct dentry *dir)
+{
+       struct super_block *sb = dir->d_sb;
+       struct selinux_fs_info *fsi = sb->s_fs_info;
+       int i;
+       static struct tree_descr files[] = {
+               { "sidtab_hash_stats", &sel_sidtab_hash_stats_ops, S_IRUGO },
+       };
+
+       for (i = 0; i < ARRAY_SIZE(files); i++) {
+               struct inode *inode;
+               struct dentry *dentry;
+
+               dentry = d_alloc_name(dir, files[i].name);
+               if (!dentry)
+                       return -ENOMEM;
+
+               inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+               if (!inode) {
+                       dput(dentry);
+                       return -ENOMEM;
+               }
+
+               inode->i_fop = files[i].ops;
+               inode->i_ino = ++fsi->last_ino;
+               d_add(dentry, inode);
+       }
+
+       return 0;
+}
+
 static ssize_t sel_read_initcon(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
@@ -1672,7 +1735,7 @@ static ssize_t sel_read_class(struct file *file, char __user *buf,
 {
        unsigned long ino = file_inode(file)->i_ino;
        char res[TMPBUFLEN];
-       ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+       ssize_t len = scnprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
        return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
@@ -1686,7 +1749,7 @@ static ssize_t sel_read_perm(struct file *file, char __user *buf,
 {
        unsigned long ino = file_inode(file)->i_ino;
        char res[TMPBUFLEN];
-       ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+       ssize_t len = scnprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
        return simple_read_from_buffer(buf, count, ppos, res, len);
 }
 
@@ -1963,6 +2026,14 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc)
        }
 
        ret = sel_make_avc_files(dentry);
+
+       dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino);
+       if (IS_ERR(dentry)) {
+               ret = PTR_ERR(dentry);
+               goto err;
+       }
+
+       ret = sel_make_ss_files(dentry);
        if (ret)
                goto err;
 
@@ -2040,7 +2111,7 @@ static int __init init_sel_fs(void)
                                          sizeof(NULL_FILE_NAME)-1);
        int err;
 
-       if (!selinux_enabled)
+       if (!selinux_enabled_boot)
                return 0;
 
        err = sysfs_create_mount_point(fs_kobj, "selinux");
index 513e67f48878008e8d8a6880b4d3e6a67c21af7a..3ba044fe02ed49d7948674d333a83dfd989d16bf 100644 (file)
@@ -31,6 +31,7 @@ struct context {
        u32 len;        /* length of string in bytes */
        struct mls_range range;
        char *str;      /* string representation if context cannot be mapped. */
+       u32 hash;       /* a hash of the string representation */
 };
 
 static inline void mls_context_init(struct context *c)
@@ -168,12 +169,13 @@ static inline int context_cpy(struct context *dst, struct context *src)
                kfree(dst->str);
                return rc;
        }
+       dst->hash = src->hash;
        return 0;
 }
 
 static inline void context_destroy(struct context *c)
 {
-       c->user = c->role = c->type = 0;
+       c->user = c->role = c->type = c->hash = 0;
        kfree(c->str);
        c->str = NULL;
        c->len = 0;
@@ -182,6 +184,8 @@ static inline void context_destroy(struct context *c)
 
 static inline int context_cmp(struct context *c1, struct context *c2)
 {
+       if (c1->hash && c2->hash && (c1->hash != c2->hash))
+               return 0;
        if (c1->len && c2->len)
                return (c1->len == c2->len && !strcmp(c1->str, c2->str));
        if (c1->len || c2->len)
@@ -192,5 +196,10 @@ static inline int context_cmp(struct context *c1, struct context *c2)
                mls_context_cmp(c1, c2));
 }
 
+static inline unsigned int context_compute_hash(const char *s)
+{
+       return full_name_hash(NULL, s, strlen(s));
+}
+
 #endif /* _SS_CONTEXT_H_ */
 
index e20624a68f5d2f7b017fb6c6a2252d5310839b30..2aa7f2e1a8e7c3ebb725b377e8f60b728a4ee6b9 100644 (file)
@@ -878,6 +878,11 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
                        sidtab_destroy(s);
                        goto out;
                }
+               rc = context_add_hash(p, &c->context[0]);
+               if (rc) {
+                       sidtab_destroy(s);
+                       goto out;
+               }
 
                rc = sidtab_set_initial(s, c->sid[0], &c->context[0]);
                if (rc) {
@@ -2654,7 +2659,7 @@ static int role_trans_write(struct policydb *p, void *fp)
 {
        struct role_trans *r = p->role_tr;
        struct role_trans *tr;
-       u32 buf[3];
+       __le32 buf[3];
        size_t nel;
        int rc;
 
@@ -2686,7 +2691,7 @@ static int role_trans_write(struct policydb *p, void *fp)
 static int role_allow_write(struct role_allow *r, void *fp)
 {
        struct role_allow *ra;
-       u32 buf[2];
+       __le32 buf[2];
        size_t nel;
        int rc;
 
index bc56b14e22166fde69031d187d6151643e6e1f9e..69b24191fa383102076c66fba5b25c28a646155f 100644 (file)
@@ -307,7 +307,7 @@ struct policydb {
 
        u16 process_class;
        u32 process_trans_perms;
-};
+} __randomize_layout;
 
 extern void policydb_destroy(struct policydb *p);
 extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
index a5813c7629c17f229f6e016cd3d5e0db86f975f7..216ce602a2b513aaad6a6e40bf900430423d5c3b 100644 (file)
@@ -91,6 +91,12 @@ static int context_struct_to_string(struct policydb *policydb,
                                    char **scontext,
                                    u32 *scontext_len);
 
+static int sidtab_entry_to_string(struct policydb *policydb,
+                                 struct sidtab *sidtab,
+                                 struct sidtab_entry *entry,
+                                 char **scontext,
+                                 u32 *scontext_len);
+
 static void context_struct_compute_av(struct policydb *policydb,
                                      struct context *scontext,
                                      struct context *tcontext,
@@ -716,20 +722,21 @@ static void context_struct_compute_av(struct policydb *policydb,
 }
 
 static int security_validtrans_handle_fail(struct selinux_state *state,
-                                          struct context *ocontext,
-                                          struct context *ncontext,
-                                          struct context *tcontext,
+                                          struct sidtab_entry *oentry,
+                                          struct sidtab_entry *nentry,
+                                          struct sidtab_entry *tentry,
                                           u16 tclass)
 {
        struct policydb *p = &state->ss->policydb;
+       struct sidtab *sidtab = state->ss->sidtab;
        char *o = NULL, *n = NULL, *t = NULL;
        u32 olen, nlen, tlen;
 
-       if (context_struct_to_string(p, ocontext, &o, &olen))
+       if (sidtab_entry_to_string(p, sidtab, oentry, &o, &olen))
                goto out;
-       if (context_struct_to_string(p, ncontext, &n, &nlen))
+       if (sidtab_entry_to_string(p, sidtab, nentry, &n, &nlen))
                goto out;
-       if (context_struct_to_string(p, tcontext, &t, &tlen))
+       if (sidtab_entry_to_string(p, sidtab, tentry, &t, &tlen))
                goto out;
        audit_log(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR,
                  "op=security_validate_transition seresult=denied"
@@ -751,16 +758,16 @@ static int security_compute_validatetrans(struct selinux_state *state,
 {
        struct policydb *policydb;
        struct sidtab *sidtab;
-       struct context *ocontext;
-       struct context *ncontext;
-       struct context *tcontext;
+       struct sidtab_entry *oentry;
+       struct sidtab_entry *nentry;
+       struct sidtab_entry *tentry;
        struct class_datum *tclass_datum;
        struct constraint_node *constraint;
        u16 tclass;
        int rc = 0;
 
 
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                return 0;
 
        read_lock(&state->ss->policy_rwlock);
@@ -779,24 +786,24 @@ static int security_compute_validatetrans(struct selinux_state *state,
        }
        tclass_datum = policydb->class_val_to_struct[tclass - 1];
 
-       ocontext = sidtab_search(sidtab, oldsid);
-       if (!ocontext) {
+       oentry = sidtab_search_entry(sidtab, oldsid);
+       if (!oentry) {
                pr_err("SELinux: %s:  unrecognized SID %d\n",
                        __func__, oldsid);
                rc = -EINVAL;
                goto out;
        }
 
-       ncontext = sidtab_search(sidtab, newsid);
-       if (!ncontext) {
+       nentry = sidtab_search_entry(sidtab, newsid);
+       if (!nentry) {
                pr_err("SELinux: %s:  unrecognized SID %d\n",
                        __func__, newsid);
                rc = -EINVAL;
                goto out;
        }
 
-       tcontext = sidtab_search(sidtab, tasksid);
-       if (!tcontext) {
+       tentry = sidtab_search_entry(sidtab, tasksid);
+       if (!tentry) {
                pr_err("SELinux: %s:  unrecognized SID %d\n",
                        __func__, tasksid);
                rc = -EINVAL;
@@ -805,15 +812,16 @@ static int security_compute_validatetrans(struct selinux_state *state,
 
        constraint = tclass_datum->validatetrans;
        while (constraint) {
-               if (!constraint_expr_eval(policydb, ocontext, ncontext,
-                                         tcontext, constraint->expr)) {
+               if (!constraint_expr_eval(policydb, &oentry->context,
+                                         &nentry->context, &tentry->context,
+                                         constraint->expr)) {
                        if (user)
                                rc = -EPERM;
                        else
                                rc = security_validtrans_handle_fail(state,
-                                                                    ocontext,
-                                                                    ncontext,
-                                                                    tcontext,
+                                                                    oentry,
+                                                                    nentry,
+                                                                    tentry,
                                                                     tclass);
                        goto out;
                }
@@ -855,12 +863,12 @@ int security_bounded_transition(struct selinux_state *state,
 {
        struct policydb *policydb;
        struct sidtab *sidtab;
-       struct context *old_context, *new_context;
+       struct sidtab_entry *old_entry, *new_entry;
        struct type_datum *type;
        int index;
        int rc;
 
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                return 0;
 
        read_lock(&state->ss->policy_rwlock);
@@ -869,16 +877,16 @@ int security_bounded_transition(struct selinux_state *state,
        sidtab = state->ss->sidtab;
 
        rc = -EINVAL;
-       old_context = sidtab_search(sidtab, old_sid);
-       if (!old_context) {
+       old_entry = sidtab_search_entry(sidtab, old_sid);
+       if (!old_entry) {
                pr_err("SELinux: %s: unrecognized SID %u\n",
                       __func__, old_sid);
                goto out;
        }
 
        rc = -EINVAL;
-       new_context = sidtab_search(sidtab, new_sid);
-       if (!new_context) {
+       new_entry = sidtab_search_entry(sidtab, new_sid);
+       if (!new_entry) {
                pr_err("SELinux: %s: unrecognized SID %u\n",
                       __func__, new_sid);
                goto out;
@@ -886,10 +894,10 @@ int security_bounded_transition(struct selinux_state *state,
 
        rc = 0;
        /* type/domain unchanged */
-       if (old_context->type == new_context->type)
+       if (old_entry->context.type == new_entry->context.type)
                goto out;
 
-       index = new_context->type;
+       index = new_entry->context.type;
        while (true) {
                type = policydb->type_val_to_struct[index - 1];
                BUG_ON(!type);
@@ -901,7 +909,7 @@ int security_bounded_transition(struct selinux_state *state,
 
                /* @newsid is bounded by @oldsid */
                rc = 0;
-               if (type->bounds == old_context->type)
+               if (type->bounds == old_entry->context.type)
                        break;
 
                index = type->bounds;
@@ -912,10 +920,10 @@ int security_bounded_transition(struct selinux_state *state,
                char *new_name = NULL;
                u32 length;
 
-               if (!context_struct_to_string(policydb, old_context,
-                                             &old_name, &length) &&
-                   !context_struct_to_string(policydb, new_context,
-                                             &new_name, &length)) {
+               if (!sidtab_entry_to_string(policydb, sidtab, old_entry,
+                                           &old_name, &length) &&
+                   !sidtab_entry_to_string(policydb, sidtab, new_entry,
+                                           &new_name, &length)) {
                        audit_log(audit_context(),
                                  GFP_ATOMIC, AUDIT_SELINUX_ERR,
                                  "op=security_bounded_transition "
@@ -1019,7 +1027,7 @@ void security_compute_xperms_decision(struct selinux_state *state,
        memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
 
        read_lock(&state->ss->policy_rwlock);
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                goto allow;
 
        policydb = &state->ss->policydb;
@@ -1104,7 +1112,7 @@ void security_compute_av(struct selinux_state *state,
        read_lock(&state->ss->policy_rwlock);
        avd_init(state, avd);
        xperms->len = 0;
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                goto allow;
 
        policydb = &state->ss->policydb;
@@ -1158,7 +1166,7 @@ void security_compute_av_user(struct selinux_state *state,
 
        read_lock(&state->ss->policy_rwlock);
        avd_init(state, avd);
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                goto allow;
 
        policydb = &state->ss->policydb;
@@ -1255,8 +1263,42 @@ static int context_struct_to_string(struct policydb *p,
        return 0;
 }
 
+static int sidtab_entry_to_string(struct policydb *p,
+                                 struct sidtab *sidtab,
+                                 struct sidtab_entry *entry,
+                                 char **scontext, u32 *scontext_len)
+{
+       int rc = sidtab_sid2str_get(sidtab, entry, scontext, scontext_len);
+
+       if (rc != -ENOENT)
+               return rc;
+
+       rc = context_struct_to_string(p, &entry->context, scontext,
+                                     scontext_len);
+       if (!rc && scontext)
+               sidtab_sid2str_put(sidtab, entry, *scontext, *scontext_len);
+       return rc;
+}
+
 #include "initial_sid_to_string.h"
 
+int security_sidtab_hash_stats(struct selinux_state *state, char *page)
+{
+       int rc;
+
+       if (!selinux_initialized(state)) {
+               pr_err("SELinux: %s:  called before initial load_policy\n",
+                      __func__);
+               return -EINVAL;
+       }
+
+       read_lock(&state->ss->policy_rwlock);
+       rc = sidtab_hash_stats(state->ss->sidtab, page);
+       read_unlock(&state->ss->policy_rwlock);
+
+       return rc;
+}
+
 const char *security_get_initial_sid_context(u32 sid)
 {
        if (unlikely(sid > SECINITSID_NUM))
@@ -1271,14 +1313,14 @@ static int security_sid_to_context_core(struct selinux_state *state,
 {
        struct policydb *policydb;
        struct sidtab *sidtab;
-       struct context *context;
+       struct sidtab_entry *entry;
        int rc = 0;
 
        if (scontext)
                *scontext = NULL;
        *scontext_len  = 0;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                if (sid <= SECINITSID_NUM) {
                        char *scontextp;
 
@@ -1302,21 +1344,23 @@ static int security_sid_to_context_core(struct selinux_state *state,
        read_lock(&state->ss->policy_rwlock);
        policydb = &state->ss->policydb;
        sidtab = state->ss->sidtab;
+
        if (force)
-               context = sidtab_search_force(sidtab, sid);
+               entry = sidtab_search_entry_force(sidtab, sid);
        else
-               context = sidtab_search(sidtab, sid);
-       if (!context) {
+               entry = sidtab_search_entry(sidtab, sid);
+       if (!entry) {
                pr_err("SELinux: %s:  unrecognized SID %d\n",
                        __func__, sid);
                rc = -EINVAL;
                goto out_unlock;
        }
-       if (only_invalid && !context->len)
-               rc = 0;
-       else
-               rc = context_struct_to_string(policydb, context, scontext,
-                                             scontext_len);
+       if (only_invalid && !entry->context.len)
+               goto out_unlock;
+
+       rc = sidtab_entry_to_string(policydb, sidtab, entry, scontext,
+                                   scontext_len);
+
 out_unlock:
        read_unlock(&state->ss->policy_rwlock);
 out:
@@ -1449,6 +1493,42 @@ out:
        return rc;
 }
 
+int context_add_hash(struct policydb *policydb,
+                    struct context *context)
+{
+       int rc;
+       char *str;
+       int len;
+
+       if (context->str) {
+               context->hash = context_compute_hash(context->str);
+       } else {
+               rc = context_struct_to_string(policydb, context,
+                                             &str, &len);
+               if (rc)
+                       return rc;
+               context->hash = context_compute_hash(str);
+               kfree(str);
+       }
+       return 0;
+}
+
+static int context_struct_to_sid(struct selinux_state *state,
+                                struct context *context, u32 *sid)
+{
+       int rc;
+       struct sidtab *sidtab = state->ss->sidtab;
+       struct policydb *policydb = &state->ss->policydb;
+
+       if (!context->hash) {
+               rc = context_add_hash(policydb, context);
+               if (rc)
+                       return rc;
+       }
+
+       return sidtab_context_to_sid(sidtab, context, sid);
+}
+
 static int security_context_to_sid_core(struct selinux_state *state,
                                        const char *scontext, u32 scontext_len,
                                        u32 *sid, u32 def_sid, gfp_t gfp_flags,
@@ -1469,7 +1549,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
        if (!scontext2)
                return -ENOMEM;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                int i;
 
                for (i = 1; i < SECINITSID_NUM; i++) {
@@ -1501,7 +1581,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
                str = NULL;
        } else if (rc)
                goto out_unlock;
-       rc = sidtab_context_to_sid(sidtab, &context, sid);
+       rc = context_struct_to_sid(state, &context, sid);
        context_destroy(&context);
 out_unlock:
        read_unlock(&state->ss->policy_rwlock);
@@ -1574,19 +1654,20 @@ int security_context_to_sid_force(struct selinux_state *state,
 
 static int compute_sid_handle_invalid_context(
        struct selinux_state *state,
-       struct context *scontext,
-       struct context *tcontext,
+       struct sidtab_entry *sentry,
+       struct sidtab_entry *tentry,
        u16 tclass,
        struct context *newcontext)
 {
        struct policydb *policydb = &state->ss->policydb;
+       struct sidtab *sidtab = state->ss->sidtab;
        char *s = NULL, *t = NULL, *n = NULL;
        u32 slen, tlen, nlen;
        struct audit_buffer *ab;
 
-       if (context_struct_to_string(policydb, scontext, &s, &slen))
+       if (sidtab_entry_to_string(policydb, sidtab, sentry, &s, &slen))
                goto out;
-       if (context_struct_to_string(policydb, tcontext, &t, &tlen))
+       if (sidtab_entry_to_string(policydb, sidtab, tentry, &t, &tlen))
                goto out;
        if (context_struct_to_string(policydb, newcontext, &n, &nlen))
                goto out;
@@ -1645,7 +1726,8 @@ static int security_compute_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct class_datum *cladatum = NULL;
-       struct context *scontext = NULL, *tcontext = NULL, newcontext;
+       struct context *scontext, *tcontext, newcontext;
+       struct sidtab_entry *sentry, *tentry;
        struct role_trans *roletr = NULL;
        struct avtab_key avkey;
        struct avtab_datum *avdatum;
@@ -1654,7 +1736,7 @@ static int security_compute_sid(struct selinux_state *state,
        int rc = 0;
        bool sock;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                switch (orig_tclass) {
                case SECCLASS_PROCESS: /* kernel value */
                        *out_sid = ssid;
@@ -1682,21 +1764,24 @@ static int security_compute_sid(struct selinux_state *state,
        policydb = &state->ss->policydb;
        sidtab = state->ss->sidtab;
 
-       scontext = sidtab_search(sidtab, ssid);
-       if (!scontext) {
+       sentry = sidtab_search_entry(sidtab, ssid);
+       if (!sentry) {
                pr_err("SELinux: %s:  unrecognized SID %d\n",
                       __func__, ssid);
                rc = -EINVAL;
                goto out_unlock;
        }
-       tcontext = sidtab_search(sidtab, tsid);
-       if (!tcontext) {
+       tentry = sidtab_search_entry(sidtab, tsid);
+       if (!tentry) {
                pr_err("SELinux: %s:  unrecognized SID %d\n",
                       __func__, tsid);
                rc = -EINVAL;
                goto out_unlock;
        }
 
+       scontext = &sentry->context;
+       tcontext = &tentry->context;
+
        if (tclass && tclass <= policydb->p_classes.nprim)
                cladatum = policydb->class_val_to_struct[tclass - 1];
 
@@ -1797,15 +1882,13 @@ static int security_compute_sid(struct selinux_state *state,
 
        /* Check the validity of the context. */
        if (!policydb_context_isvalid(policydb, &newcontext)) {
-               rc = compute_sid_handle_invalid_context(state, scontext,
-                                                       tcontext,
-                                                       tclass,
-                                                       &newcontext);
+               rc = compute_sid_handle_invalid_context(state, sentry, tentry,
+                                                       tclass, &newcontext);
                if (rc)
                        goto out_unlock;
        }
        /* Obtain the sid for the context. */
-       rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+       rc = context_struct_to_sid(state, &newcontext, out_sid);
 out_unlock:
        read_unlock(&state->ss->policy_rwlock);
        context_destroy(&newcontext);
@@ -1957,6 +2040,7 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
                        context_init(newc);
                        newc->str = s;
                        newc->len = oldc->len;
+                       newc->hash = oldc->hash;
                        return 0;
                }
                kfree(s);
@@ -2033,6 +2117,10 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
                        goto bad;
        }
 
+       rc = context_add_hash(args->newp, newc);
+       if (rc)
+               goto bad;
+
        return 0;
 bad:
        /* Map old representation to string and save it. */
@@ -2042,6 +2130,7 @@ bad:
        context_destroy(newc);
        newc->str = s;
        newc->len = len;
+       newc->hash = context_compute_hash(s);
        pr_info("SELinux:  Context %s became invalid (unmapped).\n",
                newc->str);
        return 0;
@@ -2094,26 +2183,17 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
        int rc = 0;
        struct policy_file file = { data, len }, *fp = &file;
 
-       oldpolicydb = kcalloc(2, sizeof(*oldpolicydb), GFP_KERNEL);
-       if (!oldpolicydb) {
-               rc = -ENOMEM;
-               goto out;
-       }
-       newpolicydb = oldpolicydb + 1;
-
        policydb = &state->ss->policydb;
 
        newsidtab = kmalloc(sizeof(*newsidtab), GFP_KERNEL);
-       if (!newsidtab) {
-               rc = -ENOMEM;
-               goto out;
-       }
+       if (!newsidtab)
+               return -ENOMEM;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                rc = policydb_read(policydb, fp);
                if (rc) {
                        kfree(newsidtab);
-                       goto out;
+                       return rc;
                }
 
                policydb->len = len;
@@ -2122,19 +2202,19 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
                if (rc) {
                        kfree(newsidtab);
                        policydb_destroy(policydb);
-                       goto out;
+                       return rc;
                }
 
                rc = policydb_load_isids(policydb, newsidtab);
                if (rc) {
                        kfree(newsidtab);
                        policydb_destroy(policydb);
-                       goto out;
+                       return rc;
                }
 
                state->ss->sidtab = newsidtab;
                security_load_policycaps(state);
-               state->initialized = 1;
+               selinux_mark_initialized(state);
                seqno = ++state->ss->latest_granting;
                selinux_complete_init();
                avc_ss_reset(state->avc, seqno);
@@ -2142,9 +2222,16 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
                selinux_status_update_policyload(state, seqno);
                selinux_netlbl_cache_invalidate();
                selinux_xfrm_notify_policyload();
-               goto out;
+               return 0;
        }
 
+       oldpolicydb = kcalloc(2, sizeof(*oldpolicydb), GFP_KERNEL);
+       if (!oldpolicydb) {
+               kfree(newsidtab);
+               return -ENOMEM;
+       }
+       newpolicydb = oldpolicydb + 1;
+
        rc = policydb_read(newpolicydb, fp);
        if (rc) {
                kfree(newsidtab);
@@ -2260,14 +2347,12 @@ int security_port_sid(struct selinux_state *state,
                      u8 protocol, u16 port, u32 *out_sid)
 {
        struct policydb *policydb;
-       struct sidtab *sidtab;
        struct ocontext *c;
        int rc = 0;
 
        read_lock(&state->ss->policy_rwlock);
 
        policydb = &state->ss->policydb;
-       sidtab = state->ss->sidtab;
 
        c = policydb->ocontexts[OCON_PORT];
        while (c) {
@@ -2280,8 +2365,7 @@ int security_port_sid(struct selinux_state *state,
 
        if (c) {
                if (!c->sid[0]) {
-                       rc = sidtab_context_to_sid(sidtab,
-                                                  &c->context[0],
+                       rc = context_struct_to_sid(state, &c->context[0],
                                                   &c->sid[0]);
                        if (rc)
                                goto out;
@@ -2306,14 +2390,12 @@ int security_ib_pkey_sid(struct selinux_state *state,
                         u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
 {
        struct policydb *policydb;
-       struct sidtab *sidtab;
        struct ocontext *c;
        int rc = 0;
 
        read_lock(&state->ss->policy_rwlock);
 
        policydb = &state->ss->policydb;
-       sidtab = state->ss->sidtab;
 
        c = policydb->ocontexts[OCON_IBPKEY];
        while (c) {
@@ -2327,7 +2409,7 @@ int security_ib_pkey_sid(struct selinux_state *state,
 
        if (c) {
                if (!c->sid[0]) {
-                       rc = sidtab_context_to_sid(sidtab,
+                       rc = context_struct_to_sid(state,
                                                   &c->context[0],
                                                   &c->sid[0]);
                        if (rc)
@@ -2352,14 +2434,12 @@ int security_ib_endport_sid(struct selinux_state *state,
                            const char *dev_name, u8 port_num, u32 *out_sid)
 {
        struct policydb *policydb;
-       struct sidtab *sidtab;
        struct ocontext *c;
        int rc = 0;
 
        read_lock(&state->ss->policy_rwlock);
 
        policydb = &state->ss->policydb;
-       sidtab = state->ss->sidtab;
 
        c = policydb->ocontexts[OCON_IBENDPORT];
        while (c) {
@@ -2374,8 +2454,7 @@ int security_ib_endport_sid(struct selinux_state *state,
 
        if (c) {
                if (!c->sid[0]) {
-                       rc = sidtab_context_to_sid(sidtab,
-                                                  &c->context[0],
+                       rc = context_struct_to_sid(state, &c->context[0],
                                                   &c->sid[0]);
                        if (rc)
                                goto out;
@@ -2398,14 +2477,12 @@ int security_netif_sid(struct selinux_state *state,
                       char *name, u32 *if_sid)
 {
        struct policydb *policydb;
-       struct sidtab *sidtab;
        int rc = 0;
        struct ocontext *c;
 
        read_lock(&state->ss->policy_rwlock);
 
        policydb = &state->ss->policydb;
-       sidtab = state->ss->sidtab;
 
        c = policydb->ocontexts[OCON_NETIF];
        while (c) {
@@ -2416,13 +2493,11 @@ int security_netif_sid(struct selinux_state *state,
 
        if (c) {
                if (!c->sid[0] || !c->sid[1]) {
-                       rc = sidtab_context_to_sid(sidtab,
-                                                 &c->context[0],
-                                                 &c->sid[0]);
+                       rc = context_struct_to_sid(state, &c->context[0],
+                                                  &c->sid[0]);
                        if (rc)
                                goto out;
-                       rc = sidtab_context_to_sid(sidtab,
-                                                  &c->context[1],
+                       rc = context_struct_to_sid(state, &c->context[1],
                                                   &c->sid[1]);
                        if (rc)
                                goto out;
@@ -2463,14 +2538,12 @@ int security_node_sid(struct selinux_state *state,
                      u32 *out_sid)
 {
        struct policydb *policydb;
-       struct sidtab *sidtab;
        int rc;
        struct ocontext *c;
 
        read_lock(&state->ss->policy_rwlock);
 
        policydb = &state->ss->policydb;
-       sidtab = state->ss->sidtab;
 
        switch (domain) {
        case AF_INET: {
@@ -2512,7 +2585,7 @@ int security_node_sid(struct selinux_state *state,
 
        if (c) {
                if (!c->sid[0]) {
-                       rc = sidtab_context_to_sid(sidtab,
+                       rc = context_struct_to_sid(state,
                                                   &c->context[0],
                                                   &c->sid[0]);
                        if (rc)
@@ -2564,7 +2637,7 @@ int security_get_user_sids(struct selinux_state *state,
        *sids = NULL;
        *nel = 0;
 
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                goto out;
 
        read_lock(&state->ss->policy_rwlock);
@@ -2596,12 +2669,17 @@ int security_get_user_sids(struct selinux_state *state,
                usercon.role = i + 1;
                ebitmap_for_each_positive_bit(&role->types, tnode, j) {
                        usercon.type = j + 1;
+                       /*
+                        * The same context struct is reused here so the hash
+                        * must be reset.
+                        */
+                       usercon.hash = 0;
 
                        if (mls_setup_user_range(policydb, fromcon, user,
                                                 &usercon))
                                continue;
 
-                       rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+                       rc = context_struct_to_sid(state, &usercon, &sid);
                        if (rc)
                                goto out_unlock;
                        if (mynel < maxnel) {
@@ -2672,7 +2750,6 @@ static inline int __security_genfs_sid(struct selinux_state *state,
                                       u32 *sid)
 {
        struct policydb *policydb = &state->ss->policydb;
-       struct sidtab *sidtab = state->ss->sidtab;
        int len;
        u16 sclass;
        struct genfs *genfs;
@@ -2707,7 +2784,7 @@ static inline int __security_genfs_sid(struct selinux_state *state,
                goto out;
 
        if (!c->sid[0]) {
-               rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]);
+               rc = context_struct_to_sid(state, &c->context[0], &c->sid[0]);
                if (rc)
                        goto out;
        }
@@ -2749,7 +2826,6 @@ int security_genfs_sid(struct selinux_state *state,
 int security_fs_use(struct selinux_state *state, struct super_block *sb)
 {
        struct policydb *policydb;
-       struct sidtab *sidtab;
        int rc = 0;
        struct ocontext *c;
        struct superblock_security_struct *sbsec = sb->s_security;
@@ -2758,7 +2834,6 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        read_lock(&state->ss->policy_rwlock);
 
        policydb = &state->ss->policydb;
-       sidtab = state->ss->sidtab;
 
        c = policydb->ocontexts[OCON_FSUSE];
        while (c) {
@@ -2770,7 +2845,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        if (c) {
                sbsec->behavior = c->v.behavior;
                if (!c->sid[0]) {
-                       rc = sidtab_context_to_sid(sidtab, &c->context[0],
+                       rc = context_struct_to_sid(state, &c->context[0],
                                                   &c->sid[0]);
                        if (rc)
                                goto out;
@@ -2798,7 +2873,7 @@ int security_get_bools(struct selinux_state *state,
        struct policydb *policydb;
        int i, rc;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                *len = 0;
                *names = NULL;
                *values = NULL;
@@ -2973,7 +3048,7 @@ int security_sid_mls_copy(struct selinux_state *state,
        int rc;
 
        rc = 0;
-       if (!state->initialized || !policydb->mls_enabled) {
+       if (!selinux_initialized(state) || !policydb->mls_enabled) {
                *new_sid = sid;
                goto out;
        }
@@ -3026,8 +3101,7 @@ int security_sid_mls_copy(struct selinux_state *state,
                        goto out_unlock;
                }
        }
-
-       rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+       rc = context_struct_to_sid(state, &newcon, new_sid);
 out_unlock:
        read_unlock(&state->ss->policy_rwlock);
        context_destroy(&newcon);
@@ -3141,7 +3215,7 @@ int security_get_classes(struct selinux_state *state,
        struct policydb *policydb = &state->ss->policydb;
        int rc;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                *nclasses = 0;
                *classes = NULL;
                return 0;
@@ -3290,7 +3364,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
 
        *rule = NULL;
 
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                return -EOPNOTSUPP;
 
        switch (field) {
@@ -3589,7 +3663,7 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
        struct context *ctx;
        struct context ctx_new;
 
-       if (!state->initialized) {
+       if (!selinux_initialized(state)) {
                *sid = SECSID_NULL;
                return 0;
        }
@@ -3620,7 +3694,7 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                if (!mls_context_isvalid(policydb, &ctx_new))
                        goto out_free;
 
-               rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+               rc = context_struct_to_sid(state, &ctx_new, sid);
                if (rc)
                        goto out_free;
 
@@ -3656,7 +3730,7 @@ int security_netlbl_sid_to_secattr(struct selinux_state *state,
        int rc;
        struct context *ctx;
 
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                return 0;
 
        read_lock(&state->ss->policy_rwlock);
@@ -3695,7 +3769,7 @@ int security_read_policy(struct selinux_state *state,
        int rc;
        struct policy_file fp;
 
-       if (!state->initialized)
+       if (!selinux_initialized(state))
                return -EINVAL;
 
        *len = security_policydb_len(state);
index 9a36de8603688e4d21774b69dc9fdafc8c89c377..c5896f39e8f628cd57c922665bca108fe1199a71 100644 (file)
@@ -8,7 +8,7 @@
 #define _SS_SERVICES_H_
 
 #include "policydb.h"
-#include "sidtab.h"
+#include "context.h"
 
 /* Mapping for a single class */
 struct selinux_mapping {
@@ -31,7 +31,7 @@ struct selinux_ss {
        struct selinux_map map;
        struct page *status_page;
        struct mutex status_lock;
-};
+} __randomize_layout;
 
 void services_compute_xperms_drivers(struct extended_perms *xperms,
                                struct avtab_node *node);
@@ -39,4 +39,6 @@ void services_compute_xperms_drivers(struct extended_perms *xperms,
 void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
                                        struct avtab_node *node);
 
+int context_add_hash(struct policydb *policydb, struct context *context);
+
 #endif /* _SS_SERVICES_H_ */
index 7d49994e8d5ff21d22765e4ce6ea37f14159c60d..a308ce1e6a13b2af9a9342c75df8ede272614461 100644 (file)
@@ -9,6 +9,8 @@
  */
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include "security.h"
 #include "sidtab.h"
 
+struct sidtab_str_cache {
+       struct rcu_head rcu_member;
+       struct list_head lru_member;
+       struct sidtab_entry *parent;
+       u32 len;
+       char str[];
+};
+
+#define index_to_sid(index) (index + SECINITSID_NUM + 1)
+#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
+
 int sidtab_init(struct sidtab *s)
 {
        u32 i;
 
        memset(s->roots, 0, sizeof(s->roots));
 
-       /* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
-       for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
-               s->rcache[i] = SIDTAB_MAX;
-
        for (i = 0; i < SECINITSID_NUM; i++)
                s->isids[i].set = 0;
 
        s->count = 0;
        s->convert = NULL;
+       hash_init(s->context_to_sid);
 
        spin_lock_init(&s->lock);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+       s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
+       INIT_LIST_HEAD(&s->cache_lru_list);
+       spin_lock_init(&s->cache_lock);
+#endif
+
        return 0;
 }
 
+static u32 context_to_sid(struct sidtab *s, struct context *context)
+{
+       struct sidtab_entry *entry;
+       u32 sid = 0;
+
+       rcu_read_lock();
+       hash_for_each_possible_rcu(s->context_to_sid, entry, list,
+                                  context->hash) {
+               if (context_cmp(&entry->context, context)) {
+                       sid = entry->sid;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       return sid;
+}
+
 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
 {
-       struct sidtab_isid_entry *entry;
+       struct sidtab_isid_entry *isid;
        int rc;
 
        if (sid == 0 || sid > SECINITSID_NUM)
                return -EINVAL;
 
-       entry = &s->isids[sid - 1];
+       isid = &s->isids[sid - 1];
 
-       rc = context_cpy(&entry->context, context);
+       rc = context_cpy(&isid->entry.context, context);
        if (rc)
                return rc;
 
-       entry->set = 1;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+       isid->entry.cache = NULL;
+#endif
+       isid->set = 1;
+
+       /*
+        * Multiple initial sids may map to the same context. Check that this
+        * context is not already represented in the context_to_sid hashtable
+        * to avoid duplicate entries and long linked lists upon hash
+        * collision.
+        */
+       if (!context_to_sid(s, context)) {
+               isid->entry.sid = sid;
+               hash_add(s->context_to_sid, &isid->entry.list, context->hash);
+       }
+
        return 0;
 }
 
+int sidtab_hash_stats(struct sidtab *sidtab, char *page)
+{
+       int i;
+       int chain_len = 0;
+       int slots_used = 0;
+       int entries = 0;
+       int max_chain_len = 0;
+       int cur_bucket = 0;
+       struct sidtab_entry *entry;
+
+       rcu_read_lock();
+       hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
+               entries++;
+               if (i == cur_bucket) {
+                       chain_len++;
+                       if (chain_len == 1)
+                               slots_used++;
+               } else {
+                       cur_bucket = i;
+                       if (chain_len > max_chain_len)
+                               max_chain_len = chain_len;
+                       chain_len = 0;
+               }
+       }
+       rcu_read_unlock();
+
+       if (chain_len > max_chain_len)
+               max_chain_len = chain_len;
+
+       return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+                        "longest chain: %d\n", entries,
+                        slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
+}
+
 static u32 sidtab_level_from_count(u32 count)
 {
        u32 capacity = SIDTAB_LEAF_ENTRIES;
@@ -88,7 +171,8 @@ static int sidtab_alloc_roots(struct sidtab *s, u32 level)
        return 0;
 }
 
-static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
+static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
+                                            int alloc)
 {
        union sidtab_entry_inner *entry;
        u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
@@ -125,10 +209,10 @@ static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
                if (!entry->ptr_leaf)
                        return NULL;
        }
-       return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context;
+       return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
 }
 
-static struct context *sidtab_lookup(struct sidtab *s, u32 index)
+static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
 {
        /* read entries only after reading count */
        u32 count = smp_load_acquire(&s->count);
@@ -139,148 +223,62 @@ static struct context *sidtab_lookup(struct sidtab *s, u32 index)
        return sidtab_do_lookup(s, index, 0);
 }
 
-static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
 {
-       return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL;
+       return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
 }
 
-static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
+                                              int force)
 {
-       struct context *context;
-
        if (sid != 0) {
+               struct sidtab_entry *entry;
+
                if (sid > SECINITSID_NUM)
-                       context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1));
+                       entry = sidtab_lookup(s, sid_to_index(sid));
                else
-                       context = sidtab_lookup_initial(s, sid);
-               if (context && (!context->len || force))
-                       return context;
+                       entry = sidtab_lookup_initial(s, sid);
+               if (entry && (!entry->context.len || force))
+                       return entry;
        }
 
        return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
 }
 
-struct context *sidtab_search(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
 {
        return sidtab_search_core(s, sid, 0);
 }
 
-struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
 {
        return sidtab_search_core(s, sid, 1);
 }
 
-static int sidtab_find_context(union sidtab_entry_inner entry,
-                              u32 *pos, u32 count, u32 level,
-                              struct context *context, u32 *index)
-{
-       int rc;
-       u32 i;
-
-       if (level != 0) {
-               struct sidtab_node_inner *node = entry.ptr_inner;
-
-               i = 0;
-               while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
-                       rc = sidtab_find_context(node->entries[i],
-                                                pos, count, level - 1,
-                                                context, index);
-                       if (rc == 0)
-                               return 0;
-                       i++;
-               }
-       } else {
-               struct sidtab_node_leaf *node = entry.ptr_leaf;
-
-               i = 0;
-               while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
-                       if (context_cmp(&node->entries[i].context, context)) {
-                               *index = *pos;
-                               return 0;
-                       }
-                       (*pos)++;
-                       i++;
-               }
-       }
-       return -ENOENT;
-}
-
-static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
-{
-       while (pos > 0) {
-               WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
-               --pos;
-       }
-       WRITE_ONCE(s->rcache[0], index);
-}
-
-static void sidtab_rcache_push(struct sidtab *s, u32 index)
-{
-       sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1);
-}
-
-static int sidtab_rcache_search(struct sidtab *s, struct context *context,
-                               u32 *index)
-{
-       u32 i;
-
-       for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
-               u32 v = READ_ONCE(s->rcache[i]);
-
-               if (v >= SIDTAB_MAX)
-                       continue;
-
-               if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
-                       sidtab_rcache_update(s, v, i);
-                       *index = v;
-                       return 0;
-               }
-       }
-       return -ENOENT;
-}
-
-static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
-                                u32 *index)
+int sidtab_context_to_sid(struct sidtab *s, struct context *context,
+                         u32 *sid)
 {
        unsigned long flags;
-       u32 count, count_locked, level, pos;
+       u32 count;
        struct sidtab_convert_params *convert;
-       struct context *dst, *dst_convert;
+       struct sidtab_entry *dst, *dst_convert;
        int rc;
 
-       rc = sidtab_rcache_search(s, context, index);
-       if (rc == 0)
-               return 0;
-
-       /* read entries only after reading count */
-       count = smp_load_acquire(&s->count);
-       level = sidtab_level_from_count(count);
-
-       pos = 0;
-       rc = sidtab_find_context(s->roots[level], &pos, count, level,
-                                context, index);
-       if (rc == 0) {
-               sidtab_rcache_push(s, *index);
+       *sid = context_to_sid(s, context);
+       if (*sid)
                return 0;
-       }
 
        /* lock-free search failed: lock, re-search, and insert if not found */
        spin_lock_irqsave(&s->lock, flags);
 
+       rc = 0;
+       *sid = context_to_sid(s, context);
+       if (*sid)
+               goto out_unlock;
+
+       /* read entries only after reading count */
+       count = smp_load_acquire(&s->count);
        convert = s->convert;
-       count_locked = s->count;
-       level = sidtab_level_from_count(count_locked);
-
-       /* if count has changed before we acquired the lock, then catch up */
-       while (count < count_locked) {
-               if (context_cmp(sidtab_do_lookup(s, count, 0), context)) {
-                       sidtab_rcache_push(s, count);
-                       *index = count;
-                       rc = 0;
-                       goto out_unlock;
-               }
-               ++count;
-       }
 
        /* bail out if we already reached max entries */
        rc = -EOVERFLOW;
@@ -293,7 +291,9 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
        if (!dst)
                goto out_unlock;
 
-       rc = context_cpy(dst, context);
+       dst->sid = index_to_sid(count);
+
+       rc = context_cpy(&dst->context, context);
        if (rc)
                goto out_unlock;
 
@@ -305,29 +305,32 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
                rc = -ENOMEM;
                dst_convert = sidtab_do_lookup(convert->target, count, 1);
                if (!dst_convert) {
-                       context_destroy(dst);
+                       context_destroy(&dst->context);
                        goto out_unlock;
                }
 
-               rc = convert->func(context, dst_convert, convert->args);
+               rc = convert->func(context, &dst_convert->context,
+                                  convert->args);
                if (rc) {
-                       context_destroy(dst);
+                       context_destroy(&dst->context);
                        goto out_unlock;
                }
-
-               /* at this point we know the insert won't fail */
+               dst_convert->sid = index_to_sid(count);
                convert->target->count = count + 1;
+
+               hash_add_rcu(convert->target->context_to_sid,
+                            &dst_convert->list, dst_convert->context.hash);
        }
 
        if (context->len)
                pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
                        context->str);
 
-       sidtab_rcache_push(s, count);
-       *index = count;
+       *sid = index_to_sid(count);
 
-       /* write entries before writing new count */
+       /* write entries before updating count */
        smp_store_release(&s->count, count + 1);
+       hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
 
        rc = 0;
 out_unlock:
@@ -335,25 +338,19 @@ out_unlock:
        return rc;
 }
 
-int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
+static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
 {
-       int rc;
+       struct sidtab_entry *entry;
        u32 i;
 
-       for (i = 0; i < SECINITSID_NUM; i++) {
-               struct sidtab_isid_entry *entry = &s->isids[i];
+       for (i = 0; i < count; i++) {
+               entry = sidtab_do_lookup(s, i, 0);
+               entry->sid = index_to_sid(i);
 
-               if (entry->set && context_cmp(context, &entry->context)) {
-                       *sid = i + 1;
-                       return 0;
-               }
-       }
+               hash_add_rcu(s->context_to_sid, &entry->list,
+                            entry->context.hash);
 
-       rc = sidtab_reverse_lookup(s, context, sid);
-       if (rc)
-               return rc;
-       *sid += SECINITSID_NUM + 1;
-       return 0;
+       }
 }
 
 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
@@ -435,7 +432,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
        /* enable live convert of new entries */
        s->convert = params;
 
-       /* we can safely do the rest of the conversion outside the lock */
+       /* we can safely convert the tree outside the lock */
        spin_unlock_irqrestore(&s->lock, flags);
 
        pr_info("SELinux:  Converting %u SID table entries...\n", count);
@@ -449,8 +446,25 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
                spin_lock_irqsave(&s->lock, flags);
                s->convert = NULL;
                spin_unlock_irqrestore(&s->lock, flags);
+               return rc;
        }
-       return rc;
+       /*
+        * The hashtable can also be modified in sidtab_context_to_sid()
+        * so we must re-acquire the lock here.
+        */
+       spin_lock_irqsave(&s->lock, flags);
+       sidtab_convert_hashtable(params->target, count);
+       spin_unlock_irqrestore(&s->lock, flags);
+
+       return 0;
+}
+
+static void sidtab_destroy_entry(struct sidtab_entry *entry)
+{
+       context_destroy(&entry->context);
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+       kfree(rcu_dereference_raw(entry->cache));
+#endif
 }
 
 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
@@ -473,7 +487,7 @@ static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
                        return;
 
                for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
-                       context_destroy(&node->entries[i].context);
+                       sidtab_destroy_entry(&node->entries[i]);
                kfree(node);
        }
 }
@@ -484,11 +498,101 @@ void sidtab_destroy(struct sidtab *s)
 
        for (i = 0; i < SECINITSID_NUM; i++)
                if (s->isids[i].set)
-                       context_destroy(&s->isids[i].context);
+                       sidtab_destroy_entry(&s->isids[i].entry);
 
        level = SIDTAB_MAX_LEVEL;
        while (level && !s->roots[level].ptr_inner)
                --level;
 
        sidtab_destroy_tree(s->roots[level], level);
+       /*
+        * The context_to_sid hashtable's objects are all shared
+        * with the isids array and context tree, and so don't need
+        * to be cleaned up here.
+        */
+}
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+                       const char *str, u32 str_len)
+{
+       struct sidtab_str_cache *cache, *victim = NULL;
+
+       /* do not cache invalid contexts */
+       if (entry->context.len)
+               return;
+
+       /*
+        * Skip the put operation when in non-task context to avoid the need
+        * to disable interrupts while holding s->cache_lock.
+        */
+       if (!in_task())
+               return;
+
+       spin_lock(&s->cache_lock);
+
+       cache = rcu_dereference_protected(entry->cache,
+                                         lockdep_is_held(&s->cache_lock));
+       if (cache) {
+               /* entry in cache - just bump to the head of LRU list */
+               list_move(&cache->lru_member, &s->cache_lru_list);
+               goto out_unlock;
+       }
+
+       cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
+       if (!cache)
+               goto out_unlock;
+
+       if (s->cache_free_slots == 0) {
+               /* pop a cache entry from the tail and free it */
+               victim = container_of(s->cache_lru_list.prev,
+                                     struct sidtab_str_cache, lru_member);
+               list_del(&victim->lru_member);
+               rcu_assign_pointer(victim->parent->cache, NULL);
+       } else {
+               s->cache_free_slots--;
+       }
+       cache->parent = entry;
+       cache->len = str_len;
+       memcpy(cache->str, str, str_len);
+       list_add(&cache->lru_member, &s->cache_lru_list);
+
+       rcu_assign_pointer(entry->cache, cache);
+
+out_unlock:
+       spin_unlock(&s->cache_lock);
+       kfree_rcu(victim, rcu_member);
 }
+
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+                      char **out, u32 *out_len)
+{
+       struct sidtab_str_cache *cache;
+       int rc = 0;
+
+       if (entry->context.len)
+               return -ENOENT; /* do not cache invalid contexts */
+
+       rcu_read_lock();
+
+       cache = rcu_dereference(entry->cache);
+       if (!cache) {
+               rc = -ENOENT;
+       } else {
+               *out_len = cache->len;
+               if (out) {
+                       *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
+                       if (!*out)
+                               rc = -ENOMEM;
+               }
+       }
+
+       rcu_read_unlock();
+
+       if (!rc && out)
+               sidtab_sid2str_put(s, entry, *out, *out_len);
+       return rc;
+}
+
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
index 1f4763141aa1174e109000a1b0c19797456a0fa0..3311d9f236c00b9f4a5b144c791b674b86c6111f 100644 (file)
 
 #include <linux/spinlock_types.h>
 #include <linux/log2.h>
+#include <linux/hashtable.h>
 
 #include "context.h"
 
-struct sidtab_entry_leaf {
+struct sidtab_entry {
+       u32 sid;
        struct context context;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+       struct sidtab_str_cache __rcu *cache;
+#endif
+       struct hlist_node list;
 };
 
-struct sidtab_node_inner;
-struct sidtab_node_leaf;
-
 union sidtab_entry_inner {
        struct sidtab_node_inner *ptr_inner;
        struct sidtab_node_leaf  *ptr_leaf;
@@ -38,7 +41,7 @@ union sidtab_entry_inner {
        (SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
 #define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
 #define SIDTAB_LEAF_ENTRIES \
-       (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
+       (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry))
 
 #define SIDTAB_MAX_BITS 32
 #define SIDTAB_MAX U32_MAX
@@ -48,7 +51,7 @@ union sidtab_entry_inner {
                     SIDTAB_INNER_SHIFT)
 
 struct sidtab_node_leaf {
-       struct sidtab_entry_leaf entries[SIDTAB_LEAF_ENTRIES];
+       struct sidtab_entry entries[SIDTAB_LEAF_ENTRIES];
 };
 
 struct sidtab_node_inner {
@@ -57,7 +60,7 @@ struct sidtab_node_inner {
 
 struct sidtab_isid_entry {
        int set;
-       struct context context;
+       struct sidtab_entry entry;
 };
 
 struct sidtab_convert_params {
@@ -66,7 +69,8 @@ struct sidtab_convert_params {
        struct sidtab *target;
 };
 
-#define SIDTAB_RCACHE_SIZE 3
+#define SIDTAB_HASH_BITS CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS
+#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
 
 struct sidtab {
        /*
@@ -83,17 +87,38 @@ struct sidtab {
        struct sidtab_convert_params *convert;
        spinlock_t lock;
 
-       /* reverse lookup cache - access atomically via {READ|WRITE}_ONCE() */
-       u32 rcache[SIDTAB_RCACHE_SIZE];
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+       /* SID -> context string cache */
+       u32 cache_free_slots;
+       struct list_head cache_lru_list;
+       spinlock_t cache_lock;
+#endif
 
        /* index == SID - 1 (no entry for SECSID_NULL) */
        struct sidtab_isid_entry isids[SECINITSID_NUM];
+
+       /* Hash table for fast reverse context-to-sid lookups. */
+       DECLARE_HASHTABLE(context_to_sid, SIDTAB_HASH_BITS);
 };
 
 int sidtab_init(struct sidtab *s);
 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
-struct context *sidtab_search(struct sidtab *s, u32 sid);
-struct context *sidtab_search_force(struct sidtab *s, u32 sid);
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid);
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid);
+
+static inline struct context *sidtab_search(struct sidtab *s, u32 sid)
+{
+       struct sidtab_entry *entry = sidtab_search_entry(s, sid);
+
+       return entry ? &entry->context : NULL;
+}
+
+static inline struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+{
+       struct sidtab_entry *entry = sidtab_search_entry_force(s, sid);
+
+       return entry ? &entry->context : NULL;
+}
 
 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
 
@@ -101,6 +126,27 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
 
 void sidtab_destroy(struct sidtab *s);
 
+int sidtab_hash_stats(struct sidtab *sidtab, char *page);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+                       const char *str, u32 str_len);
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+                      char **out, u32 *out_len);
+#else
+static inline void sidtab_sid2str_put(struct sidtab *s,
+                                     struct sidtab_entry *entry,
+                                     const char *str, u32 str_len)
+{
+}
+static inline int sidtab_sid2str_get(struct sidtab *s,
+                                    struct sidtab_entry *entry,
+                                    char **out, u32 *out_len)
+{
+       return -ENOENT;
+}
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
+
 #endif /* _SS_SIDTAB_H_ */
 
 
index dd3d5942e6692457b505e137e0bd30ef6cb90dab..c36bafbcd77ee4fc29396cbd657c600589448e94 100644 (file)
@@ -951,7 +951,8 @@ static bool tomoyo_manager(void)
        exe = tomoyo_get_exe();
        if (!exe)
                return false;
-       list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
+       list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (!ptr->head.is_deleted &&
                    (!tomoyo_pathcmp(domainname, ptr->manager) ||
                     !strcmp(exe, ptr->manager->name))) {
@@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname)
        if (mutex_lock_interruptible(&tomoyo_policy_lock))
                return -EINTR;
        /* Is there an active domain? */
-       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                /* Never delete tomoyo_kernel_domain */
                if (domain == &tomoyo_kernel_domain)
                        continue;
@@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void)
 
        tomoyo_policy_loaded = true;
        pr_info("TOMOYO: 2.6.0\n");
-       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                const u8 profile = domain->profile;
                struct tomoyo_policy_namespace *ns = domain->ns;
 
index 8526a0a74023855bbf383e56528e7ebf8107a8a9..7869d6a9980bfc4e7c0cf70a826d7b5661aaef67 100644 (file)
@@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
 
        if (mutex_lock_interruptible(&tomoyo_policy_lock))
                return -ENOMEM;
-       list_for_each_entry_rcu(entry, list, list) {
+       list_for_each_entry_rcu(entry, list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
                        continue;
                if (!check_duplicate(entry, new_entry))
@@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
        }
        if (mutex_lock_interruptible(&tomoyo_policy_lock))
                goto out;
-       list_for_each_entry_rcu(entry, list, list) {
+       list_for_each_entry_rcu(entry, list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
                        continue;
                if (!tomoyo_same_acl_head(entry, new_entry) ||
@@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
        u16 i = 0;
 
 retry:
-       list_for_each_entry_rcu(ptr, list, list) {
+       list_for_each_entry_rcu(ptr, list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (ptr->is_deleted || ptr->type != r->param_type)
                        continue;
                if (!check_entry(r, ptr))
@@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition
 {
        const struct tomoyo_transition_control *ptr;
 
-       list_for_each_entry_rcu(ptr, list, head.list) {
+       list_for_each_entry_rcu(ptr, list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (ptr->head.is_deleted || ptr->type != type)
                        continue;
                if (ptr->domainname) {
@@ -735,7 +739,8 @@ retry:
 
                /* Check 'aggregator' directive. */
                candidate = &exename;
-               list_for_each_entry_rcu(ptr, list, head.list) {
+               list_for_each_entry_rcu(ptr, list, head.list,
+                                       srcu_read_lock_held(&tomoyo_ss)) {
                        if (ptr->head.is_deleted ||
                            !tomoyo_path_matches_pattern(&exename,
                                                         ptr->original_name))
index a37c7dc66e4448e41a498b5005950e6d127833e3..1cecdd7975971224683fc5f660eaf5e29d2fdacf 100644 (file)
@@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
 {
        struct tomoyo_path_group *member;
 
-       list_for_each_entry_rcu(member, &group->member_list, head.list) {
+       list_for_each_entry_rcu(member, &group->member_list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (member->head.is_deleted)
                        continue;
                if (!tomoyo_path_matches_pattern(pathname, member->member_name))
@@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min,
        struct tomoyo_number_group *member;
        bool matched = false;
 
-       list_for_each_entry_rcu(member, &group->member_list, head.list) {
+       list_for_each_entry_rcu(member, &group->member_list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (member->head.is_deleted)
                        continue;
                if (min > member->number.values[1] ||
@@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
        bool matched = false;
        const u8 size = is_ipv6 ? 16 : 4;
 
-       list_for_each_entry_rcu(member, &group->member_list, head.list) {
+       list_for_each_entry_rcu(member, &group->member_list, head.list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (member->head.is_deleted)
                        continue;
                if (member->address.is_ipv6 != is_ipv6)
index e7832448d721d5acaa502b3a47f17a4fa0a94e86..bf38fc1b59b2878b42533c349a37646fb2630e0b 100644 (file)
@@ -217,31 +217,6 @@ out:
        return ERR_PTR(-ENOMEM);
 }
 
-/**
- * tomoyo_get_socket_name - Get the name of a socket.
- *
- * @path:   Pointer to "struct path".
- * @buffer: Pointer to buffer to return value in.
- * @buflen: Sizeof @buffer.
- *
- * Returns the buffer.
- */
-static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
-                                   const int buflen)
-{
-       struct inode *inode = d_backing_inode(path->dentry);
-       struct socket *sock = inode ? SOCKET_I(inode) : NULL;
-       struct sock *sk = sock ? sock->sk : NULL;
-
-       if (sk) {
-               snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
-                        sk->sk_family, sk->sk_type, sk->sk_protocol);
-       } else {
-               snprintf(buffer, buflen, "socket:[unknown]");
-       }
-       return buffer;
-}
-
 /**
  * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
  *
@@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path)
                        break;
                /* To make sure that pos is '\0' terminated. */
                buf[buf_len - 1] = '\0';
-               /* Get better name for socket. */
-               if (sb->s_magic == SOCKFS_MAGIC) {
-                       pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
-                       goto encode;
-               }
-               /* For "pipe:[\$]". */
+               /* For "pipe:[\$]" and "socket:[\$]". */
                if (dentry->d_op && dentry->d_op->d_dname) {
                        pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
                        goto encode;
index 52752e1a84ed8f9131d303896e3a3af7504247da..eba0b3395851e0ae4a3be8be16dc37f28dccd726 100644 (file)
@@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
 
        name.name = domainname;
        tomoyo_fill_path_info(&name);
-       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+       list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                if (!domain->is_deleted &&
                    !tomoyo_pathcmp(&name, domain->domainname))
                        return domain;
@@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                return false;
        if (!domain)
                return true;
-       list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+       list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
+                               srcu_read_lock_held(&tomoyo_ss)) {
                u16 perm;
                u8 i;
 
index 1fe581167b7b9aad19be6a06c3c5d98d3183cf45..d083225344a06d7a5aadb5b9afb5d7918f459c02 100644 (file)
@@ -739,6 +739,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
        while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
                runtime->boundary *= 2;
 
+       /* clear the buffer for avoiding possible kernel info leaks */
+       if (runtime->dma_area && !substream->ops->copy_user)
+               memset(runtime->dma_area, 0, runtime->dma_bytes);
+
        snd_pcm_timer_resolution_change(substream);
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
 
index 63dc7bdb622df37ef1827121459bc9f9d4f78782..be59b59c9be40c233995817f3ddb3e5d71fbb6d7 100644 (file)
@@ -471,15 +471,19 @@ void snd_seq_info_timer_read(struct snd_info_entry *entry,
                q = queueptr(idx);
                if (q == NULL)
                        continue;
-               if ((tmr = q->timer) == NULL ||
-                   (ti = tmr->timeri) == NULL) {
-                       queuefree(q);
-                       continue;
-               }
+               mutex_lock(&q->timer_mutex);
+               tmr = q->timer;
+               if (!tmr)
+                       goto unlock;
+               ti = tmr->timeri;
+               if (!ti)
+                       goto unlock;
                snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
                resolution = snd_timer_resolution(ti) * tmr->ticks;
                snd_iprintf(buffer, "  Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000);
                snd_iprintf(buffer, "  Skew : %u / %u\n", tmr->skew, tmr->skew_base);
+unlock:
+               mutex_unlock(&q->timer_mutex);
                queuefree(q);
        }
 }
index 70a6d1832698f9b2cfe20e0cf73f9a631e4e2639..db1295f840b7639c925a50a3d9c7218646840b72 100644 (file)
@@ -1123,7 +1123,7 @@ snd_ml403_ac97cr_create(struct snd_card *card, struct platform_device *pfdev,
        PDEBUG(INIT_INFO, "Trying to reserve resources now ...\n");
        resource = platform_get_resource(pfdev, IORESOURCE_MEM, 0);
        /* get "port" */
-       ml403_ac97cr->port = ioremap_nocache(resource->start,
+       ml403_ac97cr->port = ioremap(resource->start,
                                             (resource->end) -
                                             (resource->start) + 1);
        if (ml403_ac97cr->port == NULL) {
index a63fcbc875adc3b9906f90d13fb3f5c8ea4ebbec..02f4a8318e38e7309da42b3274a59fee95d8349f 100644 (file)
@@ -159,8 +159,11 @@ int snd_dice_detect_extension_formats(struct snd_dice *dice)
                int j;
 
                for (j = i + 1; j < 9; ++j) {
-                       if (pointers[i * 2] == pointers[j * 2])
+                       if (pointers[i * 2] == pointers[j * 2]) {
+                               // Fallback to limited functionality.
+                               err = -ENXIO;
                                goto end;
+                       }
                }
        }
 
index 4e3bd9a2bec0656c7337817c9af71bd93fba7a98..bd91c6ecb1123bf755b7952d3ffc0e3f50943738 100644 (file)
@@ -247,7 +247,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
                mutex_unlock(&ff->mutex);
        }
 
-       return 0;
+       return err;
 }
 
 static int pcm_hw_free(struct snd_pcm_substream *substream)
index 349b4d09e84f6f885dd3a9bff2b0e29b127795e8..0059709310303c07a11bce1c04a551ac6502cdce 100644 (file)
@@ -177,18 +177,14 @@ static int pcm_open(struct snd_pcm_substream *substream)
                        err = snd_pcm_hw_constraint_minmax(substream->runtime,
                                        SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                        frames_per_period, frames_per_period);
-                       if (err < 0) {
-                               mutex_unlock(&motu->mutex);
+                       if (err < 0)
                                goto err_locked;
-                       }
 
                        err = snd_pcm_hw_constraint_minmax(substream->runtime,
                                        SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
                                        frames_per_buffer, frames_per_buffer);
-                       if (err < 0) {
-                               mutex_unlock(&motu->mutex);
+                       if (err < 0)
                                goto err_locked;
-                       }
                }
        }
 
index 9124603edabe58690d90562f0aa00d9413909d84..67fd3e844dd6aca2a1a0e4045e8d35f749a8b0fc 100644 (file)
@@ -285,7 +285,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
                mutex_unlock(&oxfw->mutex);
        }
 
-       return 0;
+       return err;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
index e80bb84c43f6780fc7006fd3930b2c546e80b2e3..f823a2ab3544bf769101e5c263b7e7d3ca96dd74 100644 (file)
@@ -157,14 +157,15 @@ static void read_status_messages(struct amdtp_stream *s,
                        if ((before ^ after) & mask) {
                                struct snd_firewire_tascam_change *entry =
                                                &tscm->queue[tscm->push_pos];
+                               unsigned long flag;
 
-                               spin_lock_irq(&tscm->lock);
+                               spin_lock_irqsave(&tscm->lock, flag);
                                entry->index = index;
                                entry->before = before;
                                entry->after = after;
                                if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT)
                                        tscm->push_pos = 0;
-                               spin_unlock_irq(&tscm->lock);
+                               spin_unlock_irqrestore(&tscm->lock, flag);
 
                                wake_up(&tscm->hwdep_wait);
                        }
index 906b1e20bae01204c5a3bd3cacf9f38bd6318630..286361ecd6404dd5714ea1175a941ce732f890fc 100644 (file)
@@ -363,7 +363,6 @@ static const struct regmap_config hda_regmap_cfg = {
        .reg_write = hda_reg_write,
        .use_single_read = true,
        .use_single_write = true,
-       .disable_locking = true,
 };
 
 /**
index f9707fb05efe6812e9ec2ad69fb45e96b9bd9db5..682ed39f79b015a3982b6e149ff3bf1c0d3632b6 100644 (file)
@@ -120,10 +120,8 @@ void snd_hdac_stream_clear(struct hdac_stream *azx_dev)
        snd_hdac_stream_updateb(azx_dev, SD_CTL,
                                SD_CTL_DMA_START | SD_INT_MASK, 0);
        snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
-       if (azx_dev->stripe) {
+       if (azx_dev->stripe)
                snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0);
-               azx_dev->stripe = 0;
-       }
        azx_dev->running = false;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_stream_clear);
index e435ebd0ced428ced9e004a71672192e08b09fa8..f15fe597582ca2447767edbd3787f5a6afabfc0e 100644 (file)
@@ -551,7 +551,7 @@ static int snd_msnd_attach(struct snd_card *card)
                free_irq(chip->irq, chip);
                return -EBUSY;
        }
-       chip->mappedbase = ioremap_nocache(chip->base, 0x8000);
+       chip->mappedbase = ioremap(chip->base, 0x8000);
        if (!chip->mappedbase) {
                printk(KERN_ERR LOGNAME
                        ": unable to map memory region 0x%lx-0x%lx\n",
index 6acc59c253794f04d262dd8bb5b66bc10debc74c..0d1eced95f33eda7ff733579c107e3ecfec50f59 100644 (file)
@@ -907,7 +907,7 @@ snd_harmony_create(struct snd_card *card,
        h->card = card;
        h->dev = padev;
        h->irq = -1;
-       h->iobase = ioremap_nocache(padev->hpa.start, HARMONY_SIZE);
+       h->iobase = ioremap(padev->hpa.start, HARMONY_SIZE);
        if (h->iobase == NULL) {
                printk(KERN_ERR PFX "unable to remap hpa 0x%lx\n",
                       (unsigned long)padev->hpa.start);
index 1cbfae856a2a9909ca54ad08f7f92abc072cbb89..459c1691bb0c5991aa3c64258b081af0d93cbe49 100644 (file)
@@ -271,7 +271,7 @@ static int snd_aw2_create(struct snd_card *card,
        }
        chip->iobase_phys = pci_resource_start(pci, 0);
        chip->iobase_virt =
-               ioremap_nocache(chip->iobase_phys,
+               ioremap(chip->iobase_phys,
                                pci_resource_len(pci, 0));
 
        if (chip->iobase_virt == NULL) {
index 102a62965ac15475b515259adc2d9762deecb48f..1465d7a17f7f7ebc545f4d937eb6378c199e0ba3 100644 (file)
@@ -3983,7 +3983,7 @@ int snd_cs46xx_create(struct snd_card *card,
                        snd_cs46xx_free(chip);
                        return -EBUSY;
                }
-               region->remap_addr = ioremap_nocache(region->base, region->size);
+               region->remap_addr = ioremap(region->base, region->size);
                if (region->remap_addr == NULL) {
                        dev_err(chip->card->dev,
                                "%s ioremap problem\n", region->name);
index 1465813bf7c6b886b372f98e727bbdc0351eb5ec..dfd55419d6679ed19a5e25d350d716787fd83728 100644 (file)
@@ -1929,7 +1929,7 @@ static int snd_echo_create(struct snd_card *card,
                return -EBUSY;
        }
        chip->dsp_registers = (volatile u32 __iomem *)
-               ioremap_nocache(chip->dsp_registers_phys, sz);
+               ioremap(chip->dsp_registers_phys, sz);
        if (!chip->dsp_registers) {
                dev_err(chip->card->dev, "ioremap failed\n");
                snd_echo_free(chip);
index 50d4a87a6bb34b57ead7cee72a5e2ceb0924b74f..f02f5b1568dee6f5e083e11417f9b67e7d355073 100644 (file)
@@ -635,36 +635,30 @@ This function assumes there are no more than 16 in/out busses or pipes
 Meters is an array [3][16][2] of long. */
 static void get_audio_meters(struct echoaudio *chip, long *meters)
 {
-       int i, m, n;
+       unsigned int i, m, n;
 
-       m = 0;
-       n = 0;
-       for (i = 0; i < num_busses_out(chip); i++, m++) {
+       for (i = 0 ; i < 96; i++)
+               meters[i] = 0;
+
+       for (m = 0, n = 0, i = 0; i < num_busses_out(chip); i++, m++) {
                meters[n++] = chip->comm_page->vu_meter[m];
                meters[n++] = chip->comm_page->peak_meter[m];
        }
-       for (; n < 32; n++)
-               meters[n] = 0;
 
 #ifdef ECHOCARD_ECHO3G
        m = E3G_MAX_OUTPUTS;    /* Skip unused meters */
 #endif
 
-       for (i = 0; i < num_busses_in(chip); i++, m++) {
+       for (n = 32, i = 0; i < num_busses_in(chip); i++, m++) {
                meters[n++] = chip->comm_page->vu_meter[m];
                meters[n++] = chip->comm_page->peak_meter[m];
        }
-       for (; n < 64; n++)
-               meters[n] = 0;
-
 #ifdef ECHOCARD_HAS_VMIXER
-       for (i = 0; i < num_pipes_out(chip); i++, m++) {
+       for (n = 64, i = 0; i < num_pipes_out(chip); i++, m++) {
                meters[n++] = chip->comm_page->vu_meter[m];
                meters[n++] = chip->comm_page->peak_meter[m];
        }
 #endif
-       for (; n < 96; n++)
-               meters[n] = 0;
 }
 
 
index 2f3b7a35f2d9cdb3b9262d220c962336f6653205..ba56b59b3e17857747c533d2635e9b61e468a528 100644 (file)
@@ -883,7 +883,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
                return -EAGAIN; /* give a chance to retry */
        }
 
-       dev_WARN(chip->card->dev,
+       dev_err(chip->card->dev,
                "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
                bus->last_cmd[addr]);
        chip->single_cmd = 1;
index 35b4526f0d287a582da079a9af63f9db070e3fab..8ef223aa1e37ef245ca41c34b7dbe363ecca9a2c 100644 (file)
@@ -125,7 +125,7 @@ static char *patch[SNDRV_CARDS];
 static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
                                        CONFIG_SND_HDA_INPUT_BEEP_MODE};
 #endif
-static bool dsp_driver = 1;
+static bool dmic_detect = 1;
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -160,9 +160,10 @@ module_param_array(beep_mode, bool, NULL, 0444);
 MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
                            "(0=off, 1=on) (default=1).");
 #endif
-module_param(dsp_driver, bool, 0444);
-MODULE_PARM_DESC(dsp_driver, "Allow DSP driver selection (bypass this driver) "
-                            "(0=off, 1=on) (default=1)");
+module_param(dmic_detect, bool, 0444);
+MODULE_PARM_DESC(dmic_detect, "Allow DSP driver selection (bypass this driver) "
+                            "(0=off, 1=on) (default=1); "
+                "deprecated, use snd-intel-dspcfg.dsp_driver option instead");
 
 #ifdef CONFIG_PM
 static int param_set_xint(const char *val, const struct kernel_param *kp);
@@ -282,12 +283,13 @@ enum {
 
 /* quirks for old Intel chipsets */
 #define AZX_DCAPS_INTEL_ICH \
-       (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE)
+       (AZX_DCAPS_OLD_SSYNC | AZX_DCAPS_NO_ALIGN_BUFSIZE |\
+        AZX_DCAPS_SYNC_WRITE)
 
 /* quirks for Intel PCH */
 #define AZX_DCAPS_INTEL_PCH_BASE \
        (AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\
-        AZX_DCAPS_SNOOP_TYPE(SCH))
+        AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
 
 /* PCH up to IVB; no runtime PM; bind with i915 gfx */
 #define AZX_DCAPS_INTEL_PCH_NOPM \
@@ -302,13 +304,13 @@ enum {
 #define AZX_DCAPS_INTEL_HASWELL \
        (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
         AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
-        AZX_DCAPS_SNOOP_TYPE(SCH))
+        AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
 
 /* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
 #define AZX_DCAPS_INTEL_BROADWELL \
        (/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
         AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
-        AZX_DCAPS_SNOOP_TYPE(SCH))
+        AZX_DCAPS_SNOOP_TYPE(SCH) | AZX_DCAPS_SYNC_WRITE)
 
 #define AZX_DCAPS_INTEL_BAYTRAIL \
        (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT)
@@ -1410,7 +1412,17 @@ static bool atpx_present(void)
        acpi_handle dhandle, atpx_handle;
        acpi_status status;
 
-       while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               dhandle = ACPI_HANDLE(&pdev->dev);
+               if (dhandle) {
+                       status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+                       if (!ACPI_FAILURE(status)) {
+                               pci_dev_put(pdev);
+                               return true;
+                       }
+               }
+       }
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
                dhandle = ACPI_HANDLE(&pdev->dev);
                if (dhandle) {
                        status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
@@ -1419,7 +1431,6 @@ static bool atpx_present(void)
                                return true;
                        }
                }
-               pci_dev_put(pdev);
        }
        return false;
 }
@@ -2089,11 +2100,13 @@ static int azx_probe(struct pci_dev *pci,
        /*
         * stop probe if another Intel's DSP driver should be activated
         */
-       if (dsp_driver) {
+       if (dmic_detect) {
                err = snd_intel_dsp_driver_probe(pci);
                if (err != SND_INTEL_DSP_DRIVER_ANY &&
                    err != SND_INTEL_DSP_DRIVER_LEGACY)
                        return -ENODEV;
+       } else {
+               dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n");
        }
 
        err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
index b7a1abb3e2318ecc1051e6b8bbef49ea6fa46f73..32ed46464af7ad0a3ec1197cb935db3a1571317f 100644 (file)
@@ -1809,13 +1809,14 @@ struct scp_msg {
 
 static void dspio_clear_response_queue(struct hda_codec *codec)
 {
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
        unsigned int dummy = 0;
-       int status = -1;
+       int status;
 
        /* clear all from the response queue */
        do {
                status = dspio_read(codec, &dummy);
-       } while (status == 0);
+       } while (status == 0 && time_before(jiffies, timeout));
 }
 
 static int dspio_get_response_data(struct hda_codec *codec)
@@ -7588,12 +7589,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
        struct ca0132_spec *spec = codec->spec;
 
        codec_dbg(codec, "ca0132_process_dsp_response\n");
+       snd_hda_power_up_pm(codec);
        if (spec->wait_scp) {
                if (dspio_get_response_data(codec) >= 0)
                        spec->wait_scp = 0;
        }
 
        dspio_clear_response_queue(codec);
+       snd_hda_power_down_pm(codec);
 }
 
 static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
@@ -7604,11 +7607,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
        /* Delay enabling the HP amp, to let the mic-detection
         * state machine run.
         */
-       cancel_delayed_work(&spec->unsol_hp_work);
-       schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
        tbl = snd_hda_jack_tbl_get(codec, cb->nid);
        if (tbl)
                tbl->block_report = 1;
+       schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
 }
 
 static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
@@ -8454,12 +8456,25 @@ static void ca0132_reboot_notify(struct hda_codec *codec)
        codec->patch_ops.free(codec);
 }
 
+#ifdef CONFIG_PM
+static int ca0132_suspend(struct hda_codec *codec)
+{
+       struct ca0132_spec *spec = codec->spec;
+
+       cancel_delayed_work_sync(&spec->unsol_hp_work);
+       return 0;
+}
+#endif
+
 static const struct hda_codec_ops ca0132_patch_ops = {
        .build_controls = ca0132_build_controls,
        .build_pcms = ca0132_build_pcms,
        .init = ca0132_init,
        .free = ca0132_free,
        .unsol_event = snd_hda_jack_unsol_event,
+#ifdef CONFIG_PM
+       .suspend = ca0132_suspend,
+#endif
        .reboot_notify = ca0132_reboot_notify,
 };
 
index 78647ee02339fbe36d1374dbf7fbbec946b458f0..630b1f5c276d4eb348e65603ffbcd1e6e087660b 100644 (file)
@@ -2021,6 +2021,8 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
                per_cvt->assigned = 0;
                hinfo->nid = 0;
 
+               azx_stream(get_azx_dev(substream))->stripe = 0;
+
                mutex_lock(&spec->pcm_lock);
                snd_hda_spdif_ctls_unassign(codec, pcm_idx);
                clear_bit(pcm_idx, &spec->pcm_in_use);
index 6d6e34b3b3aa5c091f52248d5b79d07d084f8ecf..f2ea3528bfb1d3eb91227aa8cb8b2cc393b50c81 100644 (file)
@@ -412,6 +412,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0672:
                alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
                break;
+       case 0x10ec0222:
        case 0x10ec0623:
                alc_update_coef_idx(codec, 0x19, 1<<13, 0);
                break;
@@ -430,6 +431,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
                break;
        case 0x10ec0899:
        case 0x10ec0900:
+       case 0x10ec0b00:
        case 0x10ec1168:
        case 0x10ec1220:
                alc_update_coef_idx(codec, 0x7, 1<<1, 0);
@@ -501,6 +503,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
        case 0x10ec0298:
@@ -2525,6 +2528,7 @@ static int patch_alc882(struct hda_codec *codec)
        case 0x10ec0882:
        case 0x10ec0885:
        case 0x10ec0900:
+       case 0x10ec0b00:
        case 0x10ec1220:
                break;
        default:
@@ -5904,9 +5908,12 @@ enum {
        ALC256_FIXUP_ASUS_HEADSET_MIC,
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
-       ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
        ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
-       ALC294_FIXUP_ASUS_INTSPK_GPIO,
+       ALC289_FIXUP_DELL_SPK2,
+       ALC289_FIXUP_DUAL_SPK,
+       ALC294_FIXUP_SPK2_TO_DAC1,
+       ALC294_FIXUP_ASUS_DUAL_SPK,
+
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6981,33 +6988,45 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                }
        },
-       [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
+       [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
-                       { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
-                       { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
+                       { 0x19, 0x04a11040 },
+                       { 0x21, 0x04211020 },
                        { }
                },
                .chained = true,
-               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
        },
-       [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
+       [ALC289_FIXUP_DELL_SPK2] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
-                       { 0x19, 0x04a11040 },
-                       { 0x21, 0x04211020 },
+                       { 0x17, 0x90170130 }, /* bass spk */
                        { }
                },
                .chained = true,
-               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+               .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
        },
-       [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
+       [ALC289_FIXUP_DUAL_SPK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC289_FIXUP_DELL_SPK2
+       },
+       [ALC294_FIXUP_SPK2_TO_DAC1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+       },
+       [ALC294_FIXUP_ASUS_DUAL_SPK] = {
                .type = HDA_FIXUP_FUNC,
                /* The GPIO must be pulled to initialize the AMP */
                .v.func = alc_fixup_gpio4,
                .chained = true,
-               .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
+               .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
        },
+
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7080,6 +7099,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7167,7 +7188,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
-       SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
+       SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
@@ -7239,6 +7260,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
        SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -7643,11 +7665,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x1a, 0x90a70130},
                {0x1b, 0x90170110},
                {0x21, 0x03211020}),
-       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
-               {0x12, 0xb7a60130},
-               {0x13, 0xb8a61140},
-               {0x16, 0x90170110},
-               {0x21, 0x04211020}),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
                {0x12, 0x90a60130},
                {0x14, 0x90170110},
@@ -7841,6 +7858,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x19, 0x40000000},
                {0x1a, 0x40000000}),
+       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+               {0x19, 0x40000000},
+               {0x1a, 0x40000000}),
        {}
 };
 
@@ -9239,6 +9259,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+       HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
        {} /* terminator */
index c80a16ee6e76171ed5123e0c374b83afbd1e6545..242542e23d283b31e01a1154fdf4103d9c9a3132 100644 (file)
@@ -647,6 +647,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
        unsigned long flags;
        unsigned char mclk_change;
        unsigned int i, old_rate;
+       bool call_set_rate = false;
 
        if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
                return -EINVAL;
@@ -670,7 +671,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
                 * setting clock rate for internal clock mode */
                old_rate = ice->get_rate(ice);
                if (force || (old_rate != rate))
-                       ice->set_rate(ice, rate);
+                       call_set_rate = true;
                else if (rate == ice->cur_rate) {
                        spin_unlock_irqrestore(&ice->reg_lock, flags);
                        return 0;
@@ -678,12 +679,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
        }
 
        ice->cur_rate = rate;
+       spin_unlock_irqrestore(&ice->reg_lock, flags);
+
+       if (call_set_rate)
+               ice->set_rate(ice, rate);
 
        /* setting master clock */
        mclk_change = ice->set_mclk(ice, rate);
 
-       spin_unlock_irqrestore(&ice->reg_lock, flags);
-
        if (mclk_change && ice->gpio.i2s_mclk_changed)
                ice->gpio.i2s_mclk_changed(ice);
        if (ice->gpio.set_pro_rate)
index 1201c9c95660f6a8fd04bda38740effc48a02d48..77c683d19fbfe67db593154d486c6de96208f0cc 100644 (file)
@@ -1353,7 +1353,7 @@ snd_nm256_peek_for_sig(struct nm256 *chip)
        unsigned long pointer_found = chip->buffer_end - 0x1400;
        u32 sig;
 
-       temp = ioremap_nocache(chip->buffer_addr + chip->buffer_end - 0x400, 16);
+       temp = ioremap(chip->buffer_addr + chip->buffer_end - 0x400, 16);
        if (temp == NULL) {
                dev_err(chip->card->dev,
                        "Unable to scan for card signature in video RAM\n");
@@ -1518,7 +1518,7 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
                err = -EBUSY;
                goto __error;
        }
-       chip->cport = ioremap_nocache(chip->cport_addr, NM_PORT2_SIZE);
+       chip->cport = ioremap(chip->cport_addr, NM_PORT2_SIZE);
        if (chip->cport == NULL) {
                dev_err(card->dev, "unable to map control port %lx\n",
                        chip->cport_addr);
@@ -1589,7 +1589,7 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
                err = -EBUSY;
                goto __error;
        }
-       chip->buffer = ioremap_nocache(chip->buffer_addr, chip->buffer_size);
+       chip->buffer = ioremap(chip->buffer_addr, chip->buffer_size);
        if (chip->buffer == NULL) {
                err = -ENOMEM;
                dev_err(card->dev, "unable to map ring buffer at %lx\n",
index 58a4b8df25d4a8f1097672630c6a178d6538a7b7..4a238de5a77e2fe824a0d48e213ea41bd2df425b 100644 (file)
@@ -1344,7 +1344,7 @@ static int snd_rme32_create(struct rme32 *rme32)
                return err;
        rme32->port = pci_resource_start(rme32->pci, 0);
 
-       rme32->iobase = ioremap_nocache(rme32->port, RME32_IO_SIZE);
+       rme32->iobase = ioremap(rme32->port, RME32_IO_SIZE);
        if (!rme32->iobase) {
                dev_err(rme32->card->dev,
                        "unable to remap memory region 0x%lx-0x%lx\n",
index 64ab55772eae8ee1d55bc530729beec5b6971726..db6033074acec90085992a3cc5fef5705d22c0ad 100644 (file)
@@ -1619,7 +1619,7 @@ snd_rme96_create(struct rme96 *rme96)
                return err;
        rme96->port = pci_resource_start(rme96->pci, 0);
 
-       rme96->iobase = ioremap_nocache(rme96->port, RME96_IO_SIZE);
+       rme96->iobase = ioremap(rme96->port, RME96_IO_SIZE);
        if (!rme96->iobase) {
                dev_err(rme96->card->dev,
                        "unable to remap memory region 0x%lx-0x%lx\n",
index cd20af465d8e1ac6a784aac0acbd4e0134bfbf2d..dfb06546ff25c5d5ba0fec52c09b0cf1b67bfee8 100644 (file)
@@ -5220,7 +5220,7 @@ static int snd_hdsp_create(struct snd_card *card,
        if ((err = pci_request_regions(pci, "hdsp")) < 0)
                return err;
        hdsp->port = pci_resource_start(pci, 0);
-       if ((hdsp->iobase = ioremap_nocache(hdsp->port, HDSP_IO_EXTENT)) == NULL) {
+       if ((hdsp->iobase = ioremap(hdsp->port, HDSP_IO_EXTENT)) == NULL) {
                dev_err(hdsp->card->dev, "unable to remap region 0x%lx-0x%lx\n",
                        hdsp->port, hdsp->port + HDSP_IO_EXTENT - 1);
                return -EBUSY;
index 75c06a7cc7793f2c2272ae94e602877fce3ca9af..e2214ba4a38df6232b1822dd500ed9bd4063abf3 100644 (file)
@@ -6594,7 +6594,7 @@ static int snd_hdspm_create(struct snd_card *card,
        dev_dbg(card->dev, "grabbed memory region 0x%lx-0x%lx\n",
                        hdspm->port, hdspm->port + io_extent - 1);
 
-       hdspm->iobase = ioremap_nocache(hdspm->port, io_extent);
+       hdspm->iobase = ioremap(hdspm->port, io_extent);
        if (!hdspm->iobase) {
                dev_err(card->dev, "unable to remap region 0x%lx-0x%lx\n",
                                hdspm->port, hdspm->port + io_extent - 1);
index ef5c2f8e17c753a2c456b6d892453512e50fbbb0..bb9130747fbbb78541f7f421a5e63ba2542a38eb 100644 (file)
@@ -2466,7 +2466,7 @@ static int snd_rme9652_create(struct snd_card *card,
        if ((err = pci_request_regions(pci, "rme9652")) < 0)
                return err;
        rme9652->port = pci_resource_start(pci, 0);
-       rme9652->iobase = ioremap_nocache(rme9652->port, RME9652_IO_EXTENT);
+       rme9652->iobase = ioremap(rme9652->port, RME9652_IO_EXTENT);
        if (rme9652->iobase == NULL) {
                dev_err(card->dev, "unable to remap region 0x%lx-0x%lx\n",
                        rme9652->port, rme9652->port + RME9652_IO_EXTENT - 1);
index ef7dd290ae05cf815f68c68d7b8d50deeada8d64..ce13dcde4c362f805a924a67ac5d3a40ea082dae 100644 (file)
@@ -1334,7 +1334,7 @@ static int sis_chip_create(struct snd_card *card,
        }
 
        rc = -EIO;
-       sis->ioaddr = ioremap_nocache(pci_resource_start(pci, 1), 0x4000);
+       sis->ioaddr = ioremap(pci_resource_start(pci, 1), 0x4000);
        if (!sis->ioaddr) {
                dev_err(&pci->dev, "unable to remap MMIO, aborting\n");
                goto error_out_cleanup;
index 125c11ed5064a0b8012ddf078cfd2c55b2c5e3cb..d3907811f698bbdf9f218bef65a5164dfbc50ab6 100644 (file)
@@ -2373,7 +2373,7 @@ int snd_ymfpci_create(struct snd_card *card,
        chip->device_id = pci->device;
        chip->rev = pci->revision;
        chip->reg_area_phys = pci_resource_start(pci, 0);
-       chip->reg_area_virt = ioremap_nocache(chip->reg_area_phys, 0x8000);
+       chip->reg_area_virt = ioremap(chip->reg_area_phys, 0x8000);
        pci_set_master(pci);
        chip->src441_used = -1;
 
index f4ee6798154af52377c7a6949e8fc99a2b0134fe..7a5621e5e2330dd872e3835811862c9b339dbfd8 100644 (file)
@@ -96,14 +96,19 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
        return 0;
 }
 
-static int da7219_clk_enable(struct snd_pcm_substream *substream,
-                            int wclk_rate, int bclk_rate)
+static int da7219_clk_enable(struct snd_pcm_substream *substream)
 {
        int ret = 0;
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
 
-       clk_set_rate(da7219_dai_wclk, wclk_rate);
-       clk_set_rate(da7219_dai_bclk, bclk_rate);
+       /*
+        * Set wclk to 48000 because the rate constraint of this driver is
+        * 48000. ADAU7002 spec: "The ADAU7002 requires a BCLK rate that is
+        * minimum of 64x the LRCLK sample rate." DA7219 is the only clk
+        * source so for all codecs we have to limit bclk to 64X lrclk.
+        */
+       clk_set_rate(da7219_dai_wclk, 48000);
+       clk_set_rate(da7219_dai_bclk, 48000 * 64);
        ret = clk_prepare_enable(da7219_dai_bclk);
        if (ret < 0) {
                dev_err(rtd->dev, "can't enable master clock %d\n", ret);
@@ -156,7 +161,7 @@ static int cz_da7219_play_startup(struct snd_pcm_substream *substream)
                                   &constraints_rates);
 
        machine->play_i2s_instance = I2S_SP_INSTANCE;
-       return 0;
+       return da7219_clk_enable(substream);
 }
 
 static int cz_da7219_cap_startup(struct snd_pcm_substream *substream)
@@ -178,7 +183,7 @@ static int cz_da7219_cap_startup(struct snd_pcm_substream *substream)
 
        machine->cap_i2s_instance = I2S_SP_INSTANCE;
        machine->capture_channel = CAP_CHANNEL1;
-       return 0;
+       return da7219_clk_enable(substream);
 }
 
 static int cz_max_startup(struct snd_pcm_substream *substream)
@@ -199,7 +204,7 @@ static int cz_max_startup(struct snd_pcm_substream *substream)
                                   &constraints_rates);
 
        machine->play_i2s_instance = I2S_BT_INSTANCE;
-       return 0;
+       return da7219_clk_enable(substream);
 }
 
 static int cz_dmic0_startup(struct snd_pcm_substream *substream)
@@ -220,7 +225,7 @@ static int cz_dmic0_startup(struct snd_pcm_substream *substream)
                                   &constraints_rates);
 
        machine->cap_i2s_instance = I2S_BT_INSTANCE;
-       return 0;
+       return da7219_clk_enable(substream);
 }
 
 static int cz_dmic1_startup(struct snd_pcm_substream *substream)
@@ -242,25 +247,7 @@ static int cz_dmic1_startup(struct snd_pcm_substream *substream)
 
        machine->cap_i2s_instance = I2S_SP_INSTANCE;
        machine->capture_channel = CAP_CHANNEL0;
-       return 0;
-}
-
-static int cz_da7219_params(struct snd_pcm_substream *substream,
-                                     struct snd_pcm_hw_params *params)
-{
-       int wclk, bclk;
-
-       wclk = params_rate(params);
-       bclk = wclk * params_channels(params) *
-               snd_pcm_format_width(params_format(params));
-       /* ADAU7002 spec: "The ADAU7002 requires a BCLK rate
-        * that is minimum of 64x the LRCLK sample rate."
-        * DA7219 is the only clk source so for all codecs
-        * we have to limit bclk to 64X lrclk.
-        */
-       if (bclk < (wclk * 64))
-               bclk = wclk * 64;
-       return da7219_clk_enable(substream, wclk, bclk);
+       return da7219_clk_enable(substream);
 }
 
 static void cz_da7219_shutdown(struct snd_pcm_substream *substream)
@@ -271,31 +258,26 @@ static void cz_da7219_shutdown(struct snd_pcm_substream *substream)
 static const struct snd_soc_ops cz_da7219_play_ops = {
        .startup = cz_da7219_play_startup,
        .shutdown = cz_da7219_shutdown,
-       .hw_params = cz_da7219_params,
 };
 
 static const struct snd_soc_ops cz_da7219_cap_ops = {
        .startup = cz_da7219_cap_startup,
        .shutdown = cz_da7219_shutdown,
-       .hw_params = cz_da7219_params,
 };
 
 static const struct snd_soc_ops cz_max_play_ops = {
        .startup = cz_max_startup,
        .shutdown = cz_da7219_shutdown,
-       .hw_params = cz_da7219_params,
 };
 
 static const struct snd_soc_ops cz_dmic0_cap_ops = {
        .startup = cz_dmic0_startup,
        .shutdown = cz_da7219_shutdown,
-       .hw_params = cz_da7219_params,
 };
 
 static const struct snd_soc_ops cz_dmic1_cap_ops = {
        .startup = cz_dmic1_startup,
        .shutdown = cz_da7219_shutdown,
-       .hw_params = cz_da7219_params,
 };
 
 SND_SOC_DAILINK_DEF(designware1,
index 0792c40e6cc10a5b25ab444bafacfe9585a64544..d28302153d74b3f5fc613f536a134167be0489ab 100644 (file)
@@ -248,7 +248,7 @@ static int au1xac97c_drvprobe(struct platform_device *pdev)
                                     pdev->name))
                return -EBUSY;
 
-       ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start,
+       ctx->mmio = devm_ioremap(&pdev->dev, iores->start,
                                         resource_size(iores));
        if (!ctx->mmio)
                return -EBUSY;
index 46f2b447ec9a55ec70e7edb55e6125bac719f8ba..7fd08fafa49039dee9c06e4645f7762da1d1c847 100644 (file)
@@ -248,7 +248,7 @@ static int au1xi2s_drvprobe(struct platform_device *pdev)
                                     pdev->name))
                return -EBUSY;
 
-       ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start,
+       ctx->mmio = devm_ioremap(&pdev->dev, iores->start,
                                         resource_size(iores));
        if (!ctx->mmio)
                return -EBUSY;
index 7b17f39a6a102f9a1ceadfe6fcf3e39c995b4008..ce3ed056ea8be950e12cda094381f1a6a2b150eb 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <crypto/hash.h>
 #include <crypto/sha.h>
+#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/io.h>
@@ -1047,10 +1048,17 @@ static const struct of_device_id cros_ec_codec_of_match[] = {
 MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match);
 #endif
 
+static const struct acpi_device_id cros_ec_codec_acpi_id[] = {
+       { "GOOG0013", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_codec_acpi_id);
+
 static struct platform_driver cros_ec_codec_platform_driver = {
        .driver = {
                .name = "cros-ec-codec",
                .of_match_table = of_match_ptr(cros_ec_codec_of_match),
+               .acpi_match_table = ACPI_PTR(cros_ec_codec_acpi_id),
        },
        .probe = cros_ec_codec_platform_probe,
 };
index 6803d39e09a5fed0bb922cf9d66a0eb2ca6de32c..43110151e928d345708394c6ae3a266441dd441f 100644 (file)
@@ -588,7 +588,9 @@ static int hdac_hda_dev_remove(struct hdac_device *hdev)
        struct hdac_hda_priv *hda_pvt;
 
        hda_pvt = dev_get_drvdata(&hdev->dev);
-       cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+       if (hda_pvt && hda_pvt->codec.registered)
+               cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+
        return 0;
 }
 
index f8b5b960e5970ef3182aaa14b528f09fab6f50be..4eaa2b5b20a58b41ff075e9977acaeba4644ad0f 100644 (file)
@@ -292,7 +292,7 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
                             struct snd_ctl_elem_info *uinfo)
 {
        uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
-       uinfo->count = FIELD_SIZEOF(struct hdmi_codec_priv, eld);
+       uinfo->count = sizeof_field(struct hdmi_codec_priv, eld);
 
        return 0;
 }
index f6bf4cfbea23fa0aae7e5af9ec933870597fb278..e46b6ada13b1b01af12a8a775cc4fcc8d6e95ef6 100644 (file)
@@ -2103,26 +2103,40 @@ static void max98090_pll_det_disable_work(struct work_struct *work)
                            M98090_IULK_MASK, 0);
 }
 
-static void max98090_pll_work(struct work_struct *work)
+static void max98090_pll_work(struct max98090_priv *max98090)
 {
-       struct max98090_priv *max98090 =
-               container_of(work, struct max98090_priv, pll_work);
        struct snd_soc_component *component = max98090->component;
+       unsigned int pll;
+       int i;
 
        if (!snd_soc_component_is_active(component))
                return;
 
        dev_info_ratelimited(component->dev, "PLL unlocked\n");
 
+       /*
+        * As the datasheet suggested, the maximum PLL lock time should be
+        * 7 msec.  The workaround resets the codec softly by toggling SHDN
+        * off and on if PLL failed to lock for 10 msec.  Notably, there is
+        * no suggested hold time for SHDN off.
+        */
+
        /* Toggle shutdown OFF then ON */
        snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
                            M98090_SHDNN_MASK, 0);
-       msleep(10);
        snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
                            M98090_SHDNN_MASK, M98090_SHDNN_MASK);
 
-       /* Give PLL time to lock */
-       msleep(10);
+       for (i = 0; i < 10; ++i) {
+               /* Give PLL time to lock */
+               usleep_range(1000, 1200);
+
+               /* Check lock status */
+               pll = snd_soc_component_read32(
+                               component, M98090_REG_DEVICE_STATUS);
+               if (!(pll & M98090_ULK_MASK))
+                       break;
+       }
 }
 
 static void max98090_jack_work(struct work_struct *work)
@@ -2259,7 +2273,7 @@ static irqreturn_t max98090_interrupt(int irq, void *data)
 
        if (active & M98090_ULK_MASK) {
                dev_dbg(component->dev, "M98090_ULK_MASK\n");
-               schedule_work(&max98090->pll_work);
+               max98090_pll_work(max98090);
        }
 
        if (active & M98090_JDET_MASK) {
@@ -2422,7 +2436,6 @@ static int max98090_probe(struct snd_soc_component *component)
                          max98090_pll_det_enable_work);
        INIT_WORK(&max98090->pll_det_disable_work,
                  max98090_pll_det_disable_work);
-       INIT_WORK(&max98090->pll_work, max98090_pll_work);
 
        /* Enable jack detection */
        snd_soc_component_write(component, M98090_REG_JACK_DETECT,
@@ -2475,7 +2488,6 @@ static void max98090_remove(struct snd_soc_component *component)
        cancel_delayed_work_sync(&max98090->jack_work);
        cancel_delayed_work_sync(&max98090->pll_det_enable_work);
        cancel_work_sync(&max98090->pll_det_disable_work);
-       cancel_work_sync(&max98090->pll_work);
        max98090->component = NULL;
 }
 
index 57965cd678b4e5ae7de6865dacd7ff4c8eee793a..a197114b0dad3fc648952b8f272fb8b87c1a77c1 100644 (file)
@@ -1530,7 +1530,6 @@ struct max98090_priv {
        struct delayed_work jack_work;
        struct delayed_work pll_det_enable_work;
        struct work_struct pll_det_disable_work;
-       struct work_struct pll_work;
        struct snd_soc_jack *jack;
        unsigned int dai_fmt;
        int tdm_slots;
index f53235be77d94f94179f7c1a87829f61b57181b4..1f7964beb20c20b1224eb3a55e7d9c3d8b4de069 100644 (file)
@@ -396,9 +396,6 @@ static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_component
 
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
-               snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
-                                   MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
-                                   MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
                snd_soc_component_update_bits(component, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0);
                snd_soc_component_update_bits(component, CDC_A_MICB_1_EN,
                                    MICB_1_EN_OPA_STG2_TAIL_CURR_MASK,
@@ -448,6 +445,14 @@ static int pm8916_wcd_analog_enable_micbias_int1(struct
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
        struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component);
 
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
+                                   MICB_1_INT_TX1_INT_RBIAS_EN_MASK,
+                                   MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE);
+               break;
+       }
+
        return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg,
                                                     wcd->micbias1_cap_mode);
 }
@@ -558,6 +563,11 @@ static int pm8916_wcd_analog_enable_micbias_int2(struct
        struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component);
 
        switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
+                                   MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
+                                   MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
+               break;
        case SND_SOC_DAPM_POST_PMU:
                pm8916_mbhc_configure_bias(wcd, true);
                break;
@@ -938,10 +948,10 @@ static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = {
 
        SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0,
                            pm8916_wcd_analog_enable_micbias_ext1,
-                           SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+                           SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0,
                            pm8916_wcd_analog_enable_micbias_ext2,
-                           SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+                           SND_SOC_DAPM_POST_PMU),
 
        SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0,
                           pm8916_wcd_analog_enable_adc,
index 58b2468fb2a7139b8d7dbb35085ecbe5acb8215f..09fccacadd6b1ec38c7d06d115c6e9df7f11d70f 100644 (file)
@@ -586,6 +586,12 @@ static int msm8916_wcd_digital_enable_interpolator(
                snd_soc_component_write(component, rx_gain_reg[w->shift],
                              snd_soc_component_read32(component, rx_gain_reg[w->shift]));
                break;
+       case SND_SOC_DAPM_POST_PMD:
+               snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL,
+                                             1 << w->shift, 1 << w->shift);
+               snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL,
+                                             1 << w->shift, 0x0);
+               break;
        }
        return 0;
 }
index adbae1f36a8afb303b85ebdb5617e33ca6a4620c..747ca248bf10c9e4b1dc763ab25e3cffdefcde3c 100644 (file)
@@ -2432,6 +2432,13 @@ static void rt5640_disable_jack_detect(struct snd_soc_component *component)
 {
        struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
 
+       /*
+        * soc_remove_component() force-disables jack and thus rt5640->jack
+        * could be NULL at the time of driver's module unloading.
+        */
+       if (!rt5640->jack)
+               return;
+
        disable_irq(rt5640->irq);
        rt5640_cancel_work(rt5640);
 
index 3af36ec928e93480ba4332430364917d926f5bd7..088b779317276f266ed4cd59571dbb8245a6d35f 100644 (file)
@@ -9,9 +9,25 @@
 #ifndef __RT5677_SPI_H__
 #define __RT5677_SPI_H__
 
+#if IS_ENABLED(CONFIG_SND_SOC_RT5677_SPI)
 int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
 int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
 int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
 void rt5677_spi_hotword_detected(void);
+#else
+static inline int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+{
+       return -EINVAL;
+}
+static inline int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
+{
+       return -EINVAL;
+}
+static inline int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
+{
+       return -EINVAL;
+}
+static inline void rt5677_spi_hotword_detected(void){}
+#endif
 
 #endif /* __RT5677_SPI_H__ */
index b1713fffa3eb925f3d79f6d23b2c2bbb2bd38ddd..ae6f6121bc1b0ffadef6f57828b46b0fe89da958 100644 (file)
@@ -73,6 +73,7 @@ struct rt5682_priv {
 static const struct reg_sequence patch_list[] = {
        {RT5682_HP_IMP_SENS_CTRL_19, 0x1000},
        {RT5682_DAC_ADC_DIG_VOL1, 0xa020},
+       {RT5682_I2C_CTRL, 0x000f},
 };
 
 static const struct reg_default rt5682_reg[] = {
@@ -2474,6 +2475,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
        mutex_lock(&rt5682->calibrate_mutex);
 
        rt5682_reset(rt5682->regmap);
+       regmap_write(rt5682->regmap, RT5682_I2C_CTRL, 0x000f);
        regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2af);
        usleep_range(15000, 20000);
        regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2af);
index 7d7ea15d73e0a677c61b094fe1e91b592378a7f7..5ffbaddd6e499579ef98d3c35b146ab645fc2c4b 100644 (file)
@@ -1806,6 +1806,12 @@ static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
 
        switch (clk_id) {
        case WM8904_CLK_AUTO:
+               /* We don't have any rate constraints, so just ignore the
+                * request to disable constraining.
+                */
+               if (!freq)
+                       return 0;
+
                mclk_freq = clk_get_rate(priv->mclk);
                /* enable FLL if a different sysclk is desired */
                if (mclk_freq != freq) {
index 3e5c69fbc33ad752afa07868d0df00444aeb920d..d9d59f45833fdd2f02a3f202b80941535e39f1ca 100644 (file)
@@ -2788,7 +2788,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
 
        if (target % Fref == 0) {
                fll_div->theta = 0;
-               fll_div->lambda = 0;
+               fll_div->lambda = 1;
        } else {
                gcd_fll = gcd(target, fratio * Fref);
 
@@ -2858,7 +2858,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
                return -EINVAL;
        }
 
-       if (fll_div.theta || fll_div.lambda)
+       if (fll_div.theta)
                fll1 |= WM8962_FLL_FRAC;
 
        /* Stop the FLL while we reconfigure */
index a1db1bce330fa5c3874b52a925ef805ff87502d4..5faecbeb54970ee6e9f5a40b3f85c4ffe1bd7158 100644 (file)
@@ -505,15 +505,20 @@ static int fsl_audmix_probe(struct platform_device *pdev)
                                              ARRAY_SIZE(fsl_audmix_dai));
        if (ret) {
                dev_err(dev, "failed to register ASoC DAI\n");
-               return ret;
+               goto err_disable_pm;
        }
 
        priv->pdev = platform_device_register_data(dev, mdrv, 0, NULL, 0);
        if (IS_ERR(priv->pdev)) {
                ret = PTR_ERR(priv->pdev);
                dev_err(dev, "failed to register platform %s: %d\n", mdrv, ret);
+               goto err_disable_pm;
        }
 
+       return 0;
+
+err_disable_pm:
+       pm_runtime_disable(dev);
        return ret;
 }
 
@@ -521,6 +526,8 @@ static int fsl_audmix_remove(struct platform_device *pdev)
 {
        struct fsl_audmix *priv = dev_get_drvdata(&pdev->dev);
 
+       pm_runtime_disable(&pdev->dev);
+
        if (priv->pdev)
                platform_device_unregister(priv->pdev);
 
index 10b82bf043d1ab190e50895c67907c30b8cf6457..55e9f8800b3e103638e38b651aa9e37ac37fdcc7 100644 (file)
@@ -371,6 +371,7 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
        do {
                struct asoc_simple_data adata;
                struct device_node *codec;
+               struct device_node *plat;
                struct device_node *np;
                int num = of_get_child_count(node);
 
@@ -381,6 +382,9 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
                        ret = -ENODEV;
                        goto error;
                }
+               /* get platform */
+               plat = of_get_child_by_name(node, is_top ?
+                                           PREFIX "plat" : "plat");
 
                /* get convert-xxx property */
                memset(&adata, 0, sizeof(adata));
@@ -389,6 +393,8 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
 
                /* loop for all CPU/Codec node */
                for_each_child_of_node(node, np) {
+                       if (plat == np)
+                               continue;
                        /*
                         * It is DPCM
                         * if it has many CPUs,
index fbecbb74350b51de1583b2be7bec5408fef46b7b..68bcec5241f74fb6da8345f722b1ed7a43f6decf 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/firmware.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_qos.h>
index b728fb56ea4db7e4db6a4b0e02ae433a14c15b0d..f3cfe83b9ac6d7ea982b26921af05718b4f2f571 100644 (file)
@@ -165,7 +165,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
        ctx->iram_base = rsrc->start + ctx->pdata->res_info->iram_offset;
        ctx->iram_end =  ctx->iram_base + ctx->pdata->res_info->iram_size - 1;
        dev_info(ctx->dev, "IRAM base: %#x", ctx->iram_base);
-       ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+       ctx->iram = devm_ioremap(ctx->dev, ctx->iram_base,
                                         ctx->pdata->res_info->iram_size);
        if (!ctx->iram) {
                dev_err(ctx->dev, "unable to map IRAM\n");
@@ -175,7 +175,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
        ctx->dram_base = rsrc->start + ctx->pdata->res_info->dram_offset;
        ctx->dram_end = ctx->dram_base + ctx->pdata->res_info->dram_size - 1;
        dev_info(ctx->dev, "DRAM base: %#x", ctx->dram_base);
-       ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+       ctx->dram = devm_ioremap(ctx->dev, ctx->dram_base,
                                         ctx->pdata->res_info->dram_size);
        if (!ctx->dram) {
                dev_err(ctx->dev, "unable to map DRAM\n");
@@ -184,7 +184,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
 
        ctx->shim_phy_add = rsrc->start + ctx->pdata->res_info->shim_offset;
        dev_info(ctx->dev, "SHIM base: %#x", ctx->shim_phy_add);
-       ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+       ctx->shim = devm_ioremap(ctx->dev, ctx->shim_phy_add,
                                        ctx->pdata->res_info->shim_size);
        if (!ctx->shim) {
                dev_err(ctx->dev, "unable to map SHIM\n");
@@ -197,7 +197,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
        /* Get mailbox addr */
        ctx->mailbox_add = rsrc->start + ctx->pdata->res_info->mbox_offset;
        dev_info(ctx->dev, "Mailbox base: %#x", ctx->mailbox_add);
-       ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+       ctx->mailbox = devm_ioremap(ctx->dev, ctx->mailbox_add,
                                            ctx->pdata->res_info->mbox_size);
        if (!ctx->mailbox) {
                dev_err(ctx->dev, "unable to map mailbox\n");
@@ -216,7 +216,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
        ctx->ddr_base = rsrc->start;
        ctx->ddr_end = rsrc->end;
        dev_info(ctx->dev, "DDR base: %#x", ctx->ddr_base);
-       ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+       ctx->ddr = devm_ioremap(ctx->dev, ctx->ddr_base,
                                        resource_size(rsrc));
        if (!ctx->ddr) {
                dev_err(ctx->dev, "unable to map DDR\n");
index 46612331f5ea7aa21bbf9e70b9945113cab67e5d..54e97455d7f6603b36e9df79f21264a212e3ed51 100644 (file)
@@ -442,7 +442,8 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "NB41"),
                },
-               .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP
+               .driver_data = (void *)(BYT_CHT_ES8316_SSP0
+                                       | BYT_CHT_ES8316_INTMIC_IN2_MAP
                                        | BYT_CHT_ES8316_JD_INVERTED),
        },
        {       /* Teclast X98 Plus II */
index dd2b5ad08659107f41184234086eb81b10608554..243f683bc02a7361fb89ff2eaa7ab1f6e1ab751c 100644 (file)
@@ -707,13 +707,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_MCLK_EN),
        },
        {
+               /* Teclast X89 */
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
                        DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
                },
                .driver_data = (void *)(BYT_RT5640_IN3_MAP |
-                                       BYT_RT5640_MCLK_EN |
-                                       BYT_RT5640_SSP0_AIF1),
+                                       BYT_RT5640_JD_SRC_JD1_IN4P |
+                                       BYT_RT5640_OVCD_TH_2000UA |
+                                       BYT_RT5640_OVCD_SF_1P0 |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
        },
        {       /* Toshiba Satellite Click Mini L9W-B */
                .matches = {
index a22f97234201175d0bc9fd2a9c65891cc847aa5a..5f1bf6d3800c6c9c7be395cdc87b3e20b24260bc 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/clk.h>
 #include <linux/dmi.h>
 #include <linux/slab.h>
-#include <asm/cpu_device_id.h>
 #include <linux/acpi.h>
 #include <sound/core.h>
 #include <sound/jack.h>
index 5d08ae0667380bea84b98537cb4d2c66c3a76281..fb9ba881970604331a6d825f09893609eae9ada2 100644 (file)
@@ -9,45 +9,52 @@
 #include <sound/soc-acpi.h>
 #include <sound/soc-acpi-intel-match.h>
 
-static struct snd_soc_acpi_codecs cml_codecs = {
+static struct snd_soc_acpi_codecs rt1011_spk_codecs = {
        .num_codecs = 1,
-       .codecs = {"10EC5682"}
+       .codecs = {"10EC1011"}
 };
 
-static struct snd_soc_acpi_codecs cml_spk_codecs = {
+static struct snd_soc_acpi_codecs max98357a_spk_codecs = {
        .num_codecs = 1,
        .codecs = {"MX98357A"}
 };
 
+/*
+ * The order of the three entries with .id = "10EC5682" matters
+ * here, because DSDT tables expose an ACPI HID for the MAX98357A
+ * speaker amplifier which is not populated on the board.
+ */
 struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
        {
-               .id = "DLGS7219",
-               .drv_name = "cml_da7219_max98357a",
-               .quirk_data = &cml_spk_codecs,
+               .id = "10EC5682",
+               .drv_name = "cml_rt1011_rt5682",
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &rt1011_spk_codecs,
                .sof_fw_filename = "sof-cml.ri",
-               .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg",
+               .sof_tplg_filename = "sof-cml-rt1011-rt5682.tplg",
        },
        {
-               .id = "MX98357A",
+               .id = "10EC5682",
                .drv_name = "sof_rt5682",
-               .quirk_data = &cml_codecs,
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &max98357a_spk_codecs,
                .sof_fw_filename = "sof-cml.ri",
                .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
        },
-       {
-               .id = "10EC1011",
-               .drv_name = "cml_rt1011_rt5682",
-               .quirk_data = &cml_codecs,
-               .sof_fw_filename = "sof-cml.ri",
-               .sof_tplg_filename = "sof-cml-rt1011-rt5682.tplg",
-       },
        {
                .id = "10EC5682",
                .drv_name = "sof_rt5682",
                .sof_fw_filename = "sof-cml.ri",
                .sof_tplg_filename = "sof-cml-rt5682.tplg",
        },
-
+       {
+               .id = "DLGS7219",
+               .drv_name = "cml_da7219_max98357a",
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &max98357a_spk_codecs,
+               .sof_fw_filename = "sof-cml.ri",
+               .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg",
+       },
        {},
 };
 EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
index 5a2c35f58fda96a5e91b46fe6d015333e0b52b07..36f697c61074de890d2abedbf2b943974776a963 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include "../common/sst-dsp.h"
index e384fdc8d60e54b001bb262de47ba1b320255f5d..2ead52bdb8ec97f567423be2b091bb32102d9ec4 100644 (file)
@@ -1955,7 +1955,7 @@ static int fsi_probe(struct platform_device *pdev)
        if (!master)
                return -ENOMEM;
 
-       master->base = devm_ioremap_nocache(&pdev->dev,
+       master->base = devm_ioremap(&pdev->dev,
                                            res->start, resource_size(res));
        if (!master->base) {
                dev_err(&pdev->dev, "Unable to ioremap FSI registers.\n");
index 9054558ce38619faf28a5bc535d59608e912f7ea..b94680fb26fa76b904d767e96f588cb299244529 100644 (file)
@@ -539,6 +539,9 @@ void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd)
        struct snd_soc_rtdcom_list *rtdcom;
        struct snd_soc_component *component;
 
+       if (!rtd->pcm)
+               return;
+
        for_each_rtd_components(rtd, rtdcom, component)
                if (component->driver->pcm_destruct)
                        component->driver->pcm_destruct(component, rtd->pcm);
index 61f230324164d0a6765aaba8b55df82982b38263..6615ef64c7f55ad1c7bd94f186babaa64feab966 100644 (file)
@@ -214,10 +214,8 @@ be_err:
  * This is to ensure there are no pops or clicks in between any music tracks
  * due to DAPM power cycling.
  */
-static void close_delayed_work(struct work_struct *work)
+static void close_delayed_work(struct snd_soc_pcm_runtime *rtd)
 {
-       struct snd_soc_pcm_runtime *rtd =
-                       container_of(work, struct snd_soc_pcm_runtime, delayed_work.work);
        struct snd_soc_dai *codec_dai = rtd->codec_dai;
 
        mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -929,7 +927,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
        }
 
        /* DAPM dai link stream work */
-       INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
+       rtd->close_delayed_work_func = close_delayed_work;
 
        rtd->compr = compr;
        compr->private_data = rtd;
index 062653ab03a37b1cac41e11f1a5984805b8ca688..8ef0efeed0a7e27ca683c86ae1e5535399f5008a 100644 (file)
@@ -419,7 +419,8 @@ static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
 
        list_del(&rtd->list);
 
-       flush_delayed_work(&rtd->delayed_work);
+       if (delayed_work_pending(&rtd->delayed_work))
+               flush_delayed_work(&rtd->delayed_work);
        snd_soc_pcm_component_free(rtd);
 
        /*
@@ -435,6 +436,15 @@ static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
        device_unregister(rtd->dev);
 }
 
+static void close_delayed_work(struct work_struct *work) {
+       struct snd_soc_pcm_runtime *rtd =
+                       container_of(work, struct snd_soc_pcm_runtime,
+                                    delayed_work.work);
+
+       if (rtd->close_delayed_work_func)
+               rtd->close_delayed_work_func(rtd);
+}
+
 static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
        struct snd_soc_card *card, struct snd_soc_dai_link *dai_link)
 {
@@ -469,7 +479,14 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
                goto free_rtd;
 
        rtd->dev = dev;
+       INIT_LIST_HEAD(&rtd->list);
+       INIT_LIST_HEAD(&rtd->component_list);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
        dev_set_drvdata(dev, rtd);
+       INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
 
        /*
         * for rtd->codec_dais
@@ -483,12 +500,6 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
        /*
         * rtd remaining settings
         */
-       INIT_LIST_HEAD(&rtd->component_list);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
-       INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
-
        rtd->card = card;
        rtd->dai_link = dai_link;
        if (!rtd->dai_link->ops)
@@ -1860,6 +1871,8 @@ match:
 
                        /* convert non BE into BE */
                        dai_link->no_pcm = 1;
+                       dai_link->dpcm_playback = 1;
+                       dai_link->dpcm_capture = 1;
 
                        /* override any BE fixups */
                        dai_link->be_hw_params_fixup =
index 76b7ee637e869780dce61b0f887309692a08849d..01e7bc03d92f96e9e6b9fc0a16453cd48fdfb8b7 100644 (file)
@@ -637,10 +637,8 @@ out:
  * This is to ensure there are no pops or clicks in between any music tracks
  * due to DAPM power cycling.
  */
-static void close_delayed_work(struct work_struct *work)
+static void close_delayed_work(struct snd_soc_pcm_runtime *rtd)
 {
-       struct snd_soc_pcm_runtime *rtd =
-                       container_of(work, struct snd_soc_pcm_runtime, delayed_work.work);
        struct snd_soc_dai *codec_dai = rtd->codec_dais[0];
 
        mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -660,7 +658,7 @@ static void close_delayed_work(struct work_struct *work)
        mutex_unlock(&rtd->card->pcm_mutex);
 }
 
-static void codec2codec_close_delayed_work(struct work_struct *work)
+static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
 {
        /*
         * Currently nothing to do for c2c links
@@ -2974,10 +2972,9 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
 
        /* DAPM dai link stream work */
        if (rtd->dai_link->params)
-               INIT_DELAYED_WORK(&rtd->delayed_work,
-                                 codec2codec_close_delayed_work);
+               rtd->close_delayed_work_func = codec2codec_close_delayed_work;
        else
-               INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
+               rtd->close_delayed_work_func = close_delayed_work;
 
        pcm->nonatomic = rtd->dai_link->nonatomic;
        rtd->pcm = pcm;
index 81d2af000a5c80b2e7f7c36a31029414d32d95c3..92e4f4d08bfad6b7ff9e9904eac0fac7f98f8bd9 100644 (file)
@@ -548,12 +548,12 @@ static void remove_link(struct snd_soc_component *comp,
        if (dobj->ops && dobj->ops->link_unload)
                dobj->ops->link_unload(comp, dobj);
 
+       list_del(&dobj->list);
+       snd_soc_remove_dai_link(comp->card, link);
+
        kfree(link->name);
        kfree(link->stream_name);
        kfree(link->cpus->dai_name);
-
-       list_del(&dobj->list);
-       snd_soc_remove_dai_link(comp->card, link);
        kfree(link);
 }
 
@@ -1933,11 +1933,13 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
        ret = soc_tplg_dai_link_load(tplg, link, NULL);
        if (ret < 0) {
                dev_err(tplg->comp->dev, "ASoC: FE link loading failed\n");
-               kfree(link->name);
-               kfree(link->stream_name);
-               kfree(link->cpus->dai_name);
-               kfree(link);
-               return ret;
+               goto err;
+       }
+
+       ret = snd_soc_add_dai_link(tplg->comp->card, link);
+       if (ret < 0) {
+               dev_err(tplg->comp->dev, "ASoC: adding FE link failed\n");
+               goto err;
        }
 
        link->dobj.index = tplg->index;
@@ -1945,8 +1947,13 @@ static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
        link->dobj.type = SND_SOC_DOBJ_DAI_LINK;
        list_add(&link->dobj.list, &tplg->comp->dobj_list);
 
-       snd_soc_add_dai_link(tplg->comp->card, link);
        return 0;
+err:
+       kfree(link->name);
+       kfree(link->stream_name);
+       kfree(link->cpus->dai_name);
+       kfree(link);
+       return ret;
 }
 
 /* create a FE DAI and DAI link from the PCM object */
@@ -2039,6 +2046,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
        int size;
        int i;
        bool abi_match;
+       int ret;
 
        count = le32_to_cpu(hdr->count);
 
@@ -2080,7 +2088,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
                }
 
                /* create the FE DAIs and DAI links */
-               soc_tplg_pcm_create(tplg, _pcm);
+               ret = soc_tplg_pcm_create(tplg, _pcm);
+               if (ret < 0) {
+                       if (!abi_match)
+                               kfree(_pcm);
+                       return ret;
+               }
 
                /* offset by version-specific struct size and
                 * real priv data size
index cfefcfd927986d537fd59476c1a1f479f4d5003c..aef6ca167b9c4b8aca519708d60d28fabff90519 100644 (file)
@@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev)
 
        priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
                                          sizeof(*priv->pd_dev), GFP_KERNEL);
-       if (!priv)
+       if (!priv->pd_dev)
                return -ENOMEM;
 
        priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
@@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev)
        }
        sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
 
+       /* set default mailbox offset for FW ready message */
+       sdev->dsp_box.offset = MBOX_OFFSET;
+
        return 0;
 
 exit_pdev_unregister:
index 2abf80b3eb52fee2c0e7f06c9c6e051028f2104d..92ef6a796fd5ec8dd554a53d76d9da306c48792e 100644 (file)
@@ -24,7 +24,8 @@
 #define DRAM_OFFSET            0x100000
 #define DRAM_SIZE              (160 * 1024)
 #define SHIM_OFFSET            0x140000
-#define SHIM_SIZE              0x100
+#define SHIM_SIZE_BYT          0x100
+#define SHIM_SIZE_CHT          0x118
 #define MBOX_OFFSET            0x144000
 #define MBOX_SIZE              0x1000
 #define EXCEPT_OFFSET          0x800
@@ -75,7 +76,7 @@ static const struct snd_sof_debugfs_map byt_debugfs[] = {
         SOF_DEBUGFS_ACCESS_D0_ONLY},
        {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
         SOF_DEBUGFS_ACCESS_D0_ONLY},
-       {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+       {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_BYT,
         SOF_DEBUGFS_ACCESS_ALWAYS},
 };
 
@@ -102,7 +103,7 @@ static const struct snd_sof_debugfs_map cht_debugfs[] = {
         SOF_DEBUGFS_ACCESS_D0_ONLY},
        {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
         SOF_DEBUGFS_ACCESS_D0_ONLY},
-       {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE,
+       {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
         SOF_DEBUGFS_ACCESS_ALWAYS},
 };
 
@@ -145,33 +146,33 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
        struct sof_ipc_dsp_oops_xtensa xoops;
        struct sof_ipc_panic_info panic_info;
        u32 stack[BYT_STACK_DUMP_SIZE];
-       u32 status, panic, imrd, imrx;
+       u64 status, panic, imrd, imrx;
 
        /* now try generic SOF status messages */
-       status = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCD);
-       panic = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCX);
+       status = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
+       panic = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
        byt_get_registers(sdev, &xoops, &panic_info, stack,
                          BYT_STACK_DUMP_SIZE);
        snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
                           BYT_STACK_DUMP_SIZE);
 
        /* provide some context for firmware debug */
-       imrx = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRX);
-       imrd = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRD);
+       imrx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRX);
+       imrd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRD);
        dev_err(sdev->dev,
-               "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+               "error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
                (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
                (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
        dev_err(sdev->dev,
-               "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+               "error: mask host: pending %s complete %s raw 0x%llx\n",
                (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
                (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
        dev_err(sdev->dev,
-               "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+               "error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
                (status & SHIM_IPCD_BUSY) ? "yes" : "no",
                (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
        dev_err(sdev->dev,
-               "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+               "error: mask DSP: pending %s complete %s raw 0x%llx\n",
                (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
                (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
 
index 827f84a0722e91d7266d5729f5bc6273cbd42f51..fbfa225d1c5afa905ced3138721575a92dc27567 100644 (file)
 #define IDISP_VID_INTEL        0x80860000
 
 /* load the legacy HDA codec driver */
-#ifdef MODULE
-static void hda_codec_load_module(struct hda_codec *codec)
+static int hda_codec_load_module(struct hda_codec *codec)
 {
+#ifdef MODULE
        char alias[MODULE_NAME_LEN];
        const char *module = alias;
 
        snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
        dev_dbg(&codec->core.dev, "loading codec module: %s\n", module);
        request_module(module);
-}
-#else
-static void hda_codec_load_module(struct hda_codec *codec) {}
 #endif
+       return device_attach(hda_codec_dev(codec));
+}
 
 /* enable controller wake up event for all codecs with jack connectors */
 void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
@@ -129,10 +128,16 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
        if ((mach_params && mach_params->common_hdmi_codec_drv) ||
            (resp & 0xFFFF0000) != IDISP_VID_INTEL) {
                hdev->type = HDA_DEV_LEGACY;
-               hda_codec_load_module(&hda_priv->codec);
+               ret = hda_codec_load_module(&hda_priv->codec);
+               /*
+                * handle ret==0 (no driver bound) as an error, but pass
+                * other return codes without modification
+                */
+               if (ret == 0)
+                       ret = -ENOENT;
        }
 
-       return 0;
+       return ret;
 #else
        hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
        if (!hdev)
index 8796f385be76c19ce20c346cb29a6b1849edb35e..896d21984b735abcfe3ca9fea6fa0fce0d0ef9e5 100644 (file)
@@ -216,6 +216,8 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
                link_dev = hda_link_stream_assign(bus, substream);
                if (!link_dev)
                        return -EBUSY;
+
+               snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
        }
 
        stream_tag = hdac_stream(link_dev)->stream_tag;
@@ -228,8 +230,6 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
        if (ret < 0)
                return ret;
 
-       snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
-
        link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
        if (!link)
                return -EINVAL;
@@ -361,6 +361,13 @@ static int hda_link_hw_free(struct snd_pcm_substream *substream,
        bus = hstream->bus;
        rtd = snd_pcm_substream_chip(substream);
        link_dev = snd_soc_dai_get_dma_data(dai, substream);
+
+       if (!link_dev) {
+               dev_dbg(dai->dev,
+                       "%s: link_dev is not assigned\n", __func__);
+               return -EINVAL;
+       }
+
        hda_stream = hstream_to_sof_hda_stream(link_dev);
 
        /* free the link DMA channel in the FW */
index b1783360fe106bdbf33acffc4443d142bfd81240..bae7ac3581e5189d63652a12a00aa5cfde27b42a 100644 (file)
@@ -329,13 +329,13 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
                if (!ret)
                        break;
 
-               dev_err(sdev->dev, "error: Error code=0x%x: FW status=0x%x\n",
+               dev_dbg(sdev->dev, "iteration %d of Core En/ROM load failed: %d\n",
+                       i, ret);
+               dev_dbg(sdev->dev, "Error code=0x%x: FW status=0x%x\n",
                        snd_sof_dsp_read(sdev, HDA_DSP_BAR,
                                         HDA_DSP_SRAM_REG_ROM_ERROR),
                        snd_sof_dsp_read(sdev, HDA_DSP_BAR,
                                         HDA_DSP_SRAM_REG_ROM_STATUS));
-               dev_err(sdev->dev, "error: iteration %d of Core En/ROM load failed: %d\n",
-                       i, ret);
        }
 
        if (i == HDA_FW_BOOT_ATTEMPTS) {
index 5994e10733642afc2bcc710798c541ed7722884d..5fdfbaa8c4ed6a7832db085a664bc4d6aa1a1836 100644 (file)
@@ -826,6 +826,9 @@ void snd_sof_ipc_free(struct snd_sof_dev *sdev)
 {
        struct snd_sof_ipc *ipc = sdev->ipc;
 
+       if (!ipc)
+               return;
+
        /* disable sending of ipc's */
        mutex_lock(&ipc->tx_mutex);
        ipc->disable_ipc_tx = true;
index 9a9a381a908dee467665c979fecc7d31fb58e3bc..432d12bd493791837ff38f8afe0e71c35d858915 100644 (file)
@@ -50,8 +50,7 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
 
        while (ext_hdr->hdr.cmd == SOF_IPC_FW_READY) {
                /* read in ext structure */
-               offset += sizeof(*ext_hdr);
-               snd_sof_dsp_block_read(sdev, bar, offset,
+               snd_sof_dsp_block_read(sdev, bar, offset + sizeof(*ext_hdr),
                                   (void *)((u8 *)ext_data + sizeof(*ext_hdr)),
                                   ext_hdr->hdr.size - sizeof(*ext_hdr));
 
@@ -61,11 +60,15 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
                /* process structure data */
                switch (ext_hdr->type) {
                case SOF_IPC_EXT_DMA_BUFFER:
+                       ret = 0;
                        break;
                case SOF_IPC_EXT_WINDOW:
                        ret = get_ext_windows(sdev, ext_hdr);
                        break;
                default:
+                       dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n",
+                                ext_hdr->type, ext_hdr->hdr.size);
+                       ret = 0;
                        break;
                }
 
index d82ab981e8408ad0f9ea0d9179eff846976a7284..e20b806ec80f1aa1cb74989baca3130c8ff04427 100644 (file)
@@ -3132,7 +3132,9 @@ found:
        case SOF_DAI_INTEL_SSP:
        case SOF_DAI_INTEL_DMIC:
        case SOF_DAI_INTEL_ALH:
-               /* no resource needs to be released for SSP, DMIC and ALH */
+       case SOF_DAI_IMX_SAI:
+       case SOF_DAI_IMX_ESAI:
+               /* no resource needs to be released for all cases above */
                break;
        case SOF_DAI_INTEL_HDA:
                ret = sof_link_hda_unload(sdev, link);
index 48ea915b24ba25018e3357fe949a3a35276ac4b5..2ed92c990b97c1c0d663b9a4c9b023507db580f2 100644 (file)
@@ -226,7 +226,6 @@ static void uni_player_set_channel_status(struct uniperif *player,
         * sampling frequency. If no sample rate is already specified, then
         * set one.
         */
-       mutex_lock(&player->ctrl_lock);
        if (runtime) {
                switch (runtime->rate) {
                case 22050:
@@ -303,7 +302,6 @@ static void uni_player_set_channel_status(struct uniperif *player,
                player->stream_settings.iec958.status[3 + (n * 4)] << 24;
                SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
        }
-       mutex_unlock(&player->ctrl_lock);
 
        /* Update the channel status */
        if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
@@ -365,8 +363,10 @@ static int uni_player_prepare_iec958(struct uniperif *player,
 
        SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
 
+       mutex_lock(&player->ctrl_lock);
        /* Update the channel status */
        uni_player_set_channel_status(player, runtime);
+       mutex_unlock(&player->ctrl_lock);
 
        /* Clear the user validity user bits */
        SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
@@ -598,7 +598,6 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
        iec958->status[1] = ucontrol->value.iec958.status[1];
        iec958->status[2] = ucontrol->value.iec958.status[2];
        iec958->status[3] = ucontrol->value.iec958.status[3];
-       mutex_unlock(&player->ctrl_lock);
 
        spin_lock_irqsave(&player->irq_lock, flags);
        if (player->substream && player->substream->runtime)
@@ -608,6 +607,8 @@ static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
                uni_player_set_channel_status(player, NULL);
 
        spin_unlock_irqrestore(&player->irq_lock, flags);
+       mutex_unlock(&player->ctrl_lock);
+
        return 0;
 }
 
index 81c407da15c5e9c1d1d76ce415e3963ee9d23ce0..08696a4adb69a69a40629cb91a25da26e94b70bb 100644 (file)
@@ -153,13 +153,13 @@ static const struct snd_soc_component_driver stm32_adfsdm_dai_component = {
        .name = "stm32_dfsdm_audio",
 };
 
-static void memcpy_32to16(void *dest, const void *src, size_t n)
+static void stm32_memcpy_32to16(void *dest, const void *src, size_t n)
 {
        unsigned int i = 0;
        u16 *d = (u16 *)dest, *s = (u16 *)src;
 
        s++;
-       for (i = n; i > 0; i--) {
+       for (i = n >> 1; i > 0; i--) {
                *d++ = *s++;
                s++;
        }
@@ -186,8 +186,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
 
        if ((priv->pos + src_size) > buff_size) {
                if (format == SNDRV_PCM_FORMAT_S16_LE)
-                       memcpy_32to16(&pcm_buff[priv->pos], src_buff,
-                                     buff_size - priv->pos);
+                       stm32_memcpy_32to16(&pcm_buff[priv->pos], src_buff,
+                                           buff_size - priv->pos);
                else
                        memcpy(&pcm_buff[priv->pos], src_buff,
                               buff_size - priv->pos);
@@ -196,8 +196,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
        }
 
        if (format == SNDRV_PCM_FORMAT_S16_LE)
-               memcpy_32to16(&pcm_buff[priv->pos],
-                             &src_buff[src_size - cur_size], cur_size);
+               stm32_memcpy_32to16(&pcm_buff[priv->pos],
+                                   &src_buff[src_size - cur_size], cur_size);
        else
                memcpy(&pcm_buff[priv->pos], &src_buff[src_size - cur_size],
                       cur_size);
index 48e629ac2d88b630778004f652cd84b4f3b53dbd..30bcd5d3a32a8ffe4bcda256200c0ecbc7f4832b 100644 (file)
@@ -184,6 +184,56 @@ static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg)
        }
 }
 
+static int stm32_sai_sub_reg_up(struct stm32_sai_sub_data *sai,
+                               unsigned int reg, unsigned int mask,
+                               unsigned int val)
+{
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_update_bits(sai->regmap, reg, mask, val);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
+}
+
+static int stm32_sai_sub_reg_wr(struct stm32_sai_sub_data *sai,
+                               unsigned int reg, unsigned int mask,
+                               unsigned int val)
+{
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write_bits(sai->regmap, reg, mask, val);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
+}
+
+static int stm32_sai_sub_reg_rd(struct stm32_sai_sub_data *sai,
+                               unsigned int reg, unsigned int *val)
+{
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_read(sai->regmap, reg, val);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
+}
+
 static const struct regmap_config stm32_sai_sub_regmap_config_f4 = {
        .reg_bits = 32,
        .reg_stride = 4,
@@ -295,7 +345,7 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
 
        mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
        cr1 = SAI_XCR1_MCKDIV_SET(div);
-       ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
+       ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, mask, cr1);
        if (ret < 0)
                dev_err(&sai->pdev->dev, "Failed to update CR1 register\n");
 
@@ -372,8 +422,8 @@ static int stm32_sai_mclk_enable(struct clk_hw *hw)
 
        dev_dbg(&sai->pdev->dev, "Enable master clock\n");
 
-       return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                 SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
+       return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                   SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
 }
 
 static void stm32_sai_mclk_disable(struct clk_hw *hw)
@@ -383,7 +433,7 @@ static void stm32_sai_mclk_disable(struct clk_hw *hw)
 
        dev_dbg(&sai->pdev->dev, "Disable master clock\n");
 
-       regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
+       stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
 }
 
 static const struct clk_ops mclk_ops = {
@@ -446,15 +496,15 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
        unsigned int sr, imr, flags;
        snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING;
 
-       regmap_read(sai->regmap, STM_SAI_IMR_REGX, &imr);
-       regmap_read(sai->regmap, STM_SAI_SR_REGX, &sr);
+       stm32_sai_sub_reg_rd(sai, STM_SAI_IMR_REGX, &imr);
+       stm32_sai_sub_reg_rd(sai, STM_SAI_SR_REGX, &sr);
 
        flags = sr & imr;
        if (!flags)
                return IRQ_NONE;
 
-       regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
-                         SAI_XCLRFR_MASK);
+       stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
+                            SAI_XCLRFR_MASK);
 
        if (!sai->substream) {
                dev_err(&pdev->dev, "Device stopped. Spurious IRQ 0x%x\n", sr);
@@ -503,8 +553,8 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        int ret;
 
        if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
-               ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                        SAI_XCR1_NODIV,
+               ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                          SAI_XCR1_NODIV,
                                         freq ? 0 : SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
@@ -583,7 +633,7 @@ static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
 
        slotr_mask |= SAI_XSLOTR_SLOTEN_MASK;
 
-       regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
 
        sai->slot_width = slot_width;
        sai->slots = slots;
@@ -665,7 +715,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
        cr1_mask |= SAI_XCR1_CKSTR;
        frcr_mask |= SAI_XFRCR_FSPOL;
 
-       regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
 
        /* DAI clock master masks */
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -693,7 +743,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
        cr1_mask |= SAI_XCR1_SLAVE;
 
 conf_update:
-       ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+       ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
        if (ret < 0) {
                dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
                return ret;
@@ -730,12 +780,12 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
        }
 
        /* Enable ITs */
-       regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX,
-                         SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
+       stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX,
+                            SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
 
        imr = SAI_XIMR_OVRUDRIE;
        if (STM_SAI_IS_CAPTURE(sai)) {
-               regmap_read(sai->regmap, STM_SAI_CR2_REGX, &cr2);
+               stm32_sai_sub_reg_rd(sai, STM_SAI_CR2_REGX, &cr2);
                if (cr2 & SAI_XCR2_MUTECNT_MASK)
                        imr |= SAI_XIMR_MUTEDETIE;
        }
@@ -745,8 +795,8 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
        else
                imr |= SAI_XIMR_AFSDETIE | SAI_XIMR_LFSDETIE;
 
-       regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
-                          SAI_XIMR_MASK, imr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX,
+                            SAI_XIMR_MASK, imr);
 
        return 0;
 }
@@ -763,10 +813,10 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
         * SAI fifo threshold is set to half fifo, to keep enough space
         * for DMA incoming bursts.
         */
-       regmap_write_bits(sai->regmap, STM_SAI_CR2_REGX,
-                         SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
-                         SAI_XCR2_FFLUSH |
-                         SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
+       stm32_sai_sub_reg_wr(sai, STM_SAI_CR2_REGX,
+                            SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
+                            SAI_XCR2_FFLUSH |
+                            SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
 
        /* DS bits in CR1 not set for SPDIF (size forced to 24 bits).*/
        if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
@@ -795,7 +845,7 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
        if ((sai->slots == 2) && (params_channels(params) == 1))
                cr1 |= SAI_XCR1_MONO;
 
-       ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+       ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
        if (ret < 0) {
                dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
                return ret;
@@ -809,7 +859,7 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai)
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int slotr, slot_sz;
 
-       regmap_read(sai->regmap, STM_SAI_SLOTR_REGX, &slotr);
+       stm32_sai_sub_reg_rd(sai, STM_SAI_SLOTR_REGX, &slotr);
 
        /*
         * If SLOTSZ is set to auto in SLOTR, align slot width on data size
@@ -831,16 +881,16 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai)
                sai->slots = 2;
 
        /* The number of slots in the audio frame is equal to NBSLOT[3:0] + 1*/
-       regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
-                          SAI_XSLOTR_NBSLOT_MASK,
-                          SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
+       stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX,
+                            SAI_XSLOTR_NBSLOT_MASK,
+                            SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
 
        /* Set default slots mask if not already set from DT */
        if (!(slotr & SAI_XSLOTR_SLOTEN_MASK)) {
                sai->slot_mask = (1 << sai->slots) - 1;
-               regmap_update_bits(sai->regmap,
-                                  STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
-                                  SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
+               stm32_sai_sub_reg_up(sai,
+                                    STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
+                                    SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
        }
 
        dev_dbg(cpu_dai->dev, "Slots %d, slot width %d\n",
@@ -870,14 +920,14 @@ static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai)
        dev_dbg(cpu_dai->dev, "Frame length %d, frame active %d\n",
                sai->fs_length, fs_active);
 
-       regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
+       stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
 
        if ((sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LSB) {
                offset = sai->slot_width - sai->data_size;
 
-               regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
-                                  SAI_XSLOTR_FBOFF_MASK,
-                                  SAI_XSLOTR_FBOFF_SET(offset));
+               stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX,
+                                    SAI_XSLOTR_FBOFF_MASK,
+                                    SAI_XSLOTR_FBOFF_SET(offset));
        }
 }
 
@@ -994,9 +1044,9 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                                        return -EINVAL;
                                }
 
-                               regmap_update_bits(sai->regmap,
-                                                  STM_SAI_CR1_REGX,
-                                                  SAI_XCR1_OSR, cr1);
+                               stm32_sai_sub_reg_up(sai,
+                                                    STM_SAI_CR1_REGX,
+                                                    SAI_XCR1_OSR, cr1);
 
                                div = stm32_sai_get_clk_div(sai, sai_clk_rate,
                                                            sai->mclk_rate);
@@ -1058,12 +1108,12 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                dev_dbg(cpu_dai->dev, "Enable DMA and SAI\n");
 
-               regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                  SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
+               stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                    SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
 
                /* Enable SAI */
-               ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                        SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
+               ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                          SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
                if (ret < 0)
                        dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
                break;
@@ -1072,16 +1122,16 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
        case SNDRV_PCM_TRIGGER_STOP:
                dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n");
 
-               regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
-                                  SAI_XIMR_MASK, 0);
+               stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX,
+                                    SAI_XIMR_MASK, 0);
 
-               regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                  SAI_XCR1_SAIEN,
-                                  (unsigned int)~SAI_XCR1_SAIEN);
+               stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                    SAI_XCR1_SAIEN,
+                                    (unsigned int)~SAI_XCR1_SAIEN);
 
-               ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
-                                        SAI_XCR1_DMAEN,
-                                        (unsigned int)~SAI_XCR1_DMAEN);
+               ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+                                          SAI_XCR1_DMAEN,
+                                          (unsigned int)~SAI_XCR1_DMAEN);
                if (ret < 0)
                        dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
 
@@ -1101,7 +1151,7 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        unsigned long flags;
 
-       regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
+       stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
        clk_disable_unprepare(sai->sai_ck);
 
@@ -1169,7 +1219,7 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
        cr1_mask |= SAI_XCR1_SYNCEN_MASK;
        cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync);
 
-       return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+       return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
 }
 
 static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = {
@@ -1322,8 +1372,13 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
        if (STM_SAI_HAS_PDM(sai) && STM_SAI_IS_SUB_A(sai))
                sai->regmap_config = &stm32_sai_sub_regmap_config_h7;
 
-       sai->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "sai_ck",
-                                               base, sai->regmap_config);
+       /*
+        * Do not manage peripheral clock through regmap framework as this
+        * can lead to circular locking issue with sai master clock provider.
+        * Manage peripheral clock directly in driver instead.
+        */
+       sai->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                                           sai->regmap_config);
        if (IS_ERR(sai->regmap)) {
                dev_err(&pdev->dev, "Failed to initialize MMIO\n");
                return PTR_ERR(sai->regmap);
@@ -1420,6 +1475,10 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
                return PTR_ERR(sai->sai_ck);
        }
 
+       ret = clk_prepare(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
+
        if (STM_SAI_IS_F4(sai->pdata))
                return 0;
 
@@ -1501,22 +1560,48 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int stm32_sai_sub_remove(struct platform_device *pdev)
+{
+       struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev);
+
+       clk_unprepare(sai->pdata->pclk);
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int stm32_sai_sub_suspend(struct device *dev)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
 
        regcache_cache_only(sai->regmap, true);
        regcache_mark_dirty(sai->regmap);
+
+       clk_disable(sai->pdata->pclk);
+
        return 0;
 }
 
 static int stm32_sai_sub_resume(struct device *dev)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_enable(sai->pdata->pclk);
+       if (ret < 0)
+               return ret;
 
        regcache_cache_only(sai->regmap, false);
-       return regcache_sync(sai->regmap);
+       ret = regcache_sync(sai->regmap);
+
+       clk_disable(sai->pdata->pclk);
+
+       return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
 
@@ -1531,6 +1616,7 @@ static struct platform_driver stm32_sai_sub_driver = {
                .pm = &stm32_sai_sub_pm_ops,
        },
        .probe = stm32_sai_sub_probe,
+       .remove = stm32_sai_sub_remove,
 };
 
 module_platform_driver(stm32_sai_sub_driver);
index 3fd28ee01675edcad8442d9bb2599a7b9c730f8c..3769d9ce5dbef9f546bd93c11c628d1c5c6b5439 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 
  * @slave_config: dma slave channel runtime config pointer
  * @phys_addr: SPDIFRX registers physical base address
  * @lock: synchronization enabling lock
+ * @irq_lock: prevent race condition with IRQ on stream state
  * @cs: channel status buffer
  * @ub: user data buffer
  * @irq: SPDIFRX interrupt line
@@ -240,6 +240,7 @@ struct stm32_spdifrx_data {
        struct dma_slave_config slave_config;
        dma_addr_t phys_addr;
        spinlock_t lock;  /* Sync enabling lock */
+       spinlock_t irq_lock; /* Prevent race condition on stream state */
        unsigned char cs[SPDIFRX_CS_BYTES_NB];
        unsigned char ub[SPDIFRX_UB_BYTES_NB];
        int irq;
@@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx)
 static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
 {
        int cr, cr_mask, imr, ret;
+       unsigned long flags;
 
        /* Enable IRQs */
        imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE;
@@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
        if (ret)
                return ret;
 
-       spin_lock(&spdifrx->lock);
+       spin_lock_irqsave(&spdifrx->lock, flags);
 
        spdifrx->refcount++;
 
@@ -362,7 +364,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
                                "Failed to start synchronization\n");
        }
 
-       spin_unlock(&spdifrx->lock);
+       spin_unlock_irqrestore(&spdifrx->lock, flags);
 
        return ret;
 }
@@ -370,11 +372,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
 static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
 {
        int cr, cr_mask, reg;
+       unsigned long flags;
 
-       spin_lock(&spdifrx->lock);
+       spin_lock_irqsave(&spdifrx->lock, flags);
 
        if (--spdifrx->refcount) {
-               spin_unlock(&spdifrx->lock);
+               spin_unlock_irqrestore(&spdifrx->lock, flags);
                return;
        }
 
@@ -393,7 +396,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
        regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, &reg);
        regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, &reg);
 
-       spin_unlock(&spdifrx->lock);
+       spin_unlock_irqrestore(&spdifrx->lock, flags);
 }
 
 static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
@@ -480,8 +483,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
        memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB);
        memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB);
 
-       pinctrl_pm_select_default_state(&spdifrx->pdev->dev);
-
        ret = stm32_spdifrx_dma_ctrl_start(spdifrx);
        if (ret < 0)
                return ret;
@@ -513,7 +514,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
 
 end:
        clk_disable_unprepare(spdifrx->kclk);
-       pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev);
 
        return ret;
 }
@@ -665,7 +665,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = {
 static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
 {
        struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid;
-       struct snd_pcm_substream *substream = spdifrx->substream;
        struct platform_device *pdev = spdifrx->pdev;
        unsigned int cr, mask, sr, imr;
        unsigned int flags, sync_state;
@@ -745,14 +744,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
                        return IRQ_HANDLED;
                }
 
-               if (substream)
-                       snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+               spin_lock(&spdifrx->irq_lock);
+               if (spdifrx->substream)
+                       snd_pcm_stop(spdifrx->substream,
+                                    SNDRV_PCM_STATE_DISCONNECTED);
+               spin_unlock(&spdifrx->irq_lock);
 
                return IRQ_HANDLED;
        }
 
-       if (err_xrun && substream)
-               snd_pcm_stop_xrun(substream);
+       spin_lock(&spdifrx->irq_lock);
+       if (err_xrun && spdifrx->substream)
+               snd_pcm_stop_xrun(spdifrx->substream);
+       spin_unlock(&spdifrx->irq_lock);
 
        return IRQ_HANDLED;
 }
@@ -761,9 +765,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream,
                                 struct snd_soc_dai *cpu_dai)
 {
        struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&spdifrx->irq_lock, flags);
        spdifrx->substream = substream;
+       spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
 
        ret = clk_prepare_enable(spdifrx->kclk);
        if (ret)
@@ -839,8 +846,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream,
                                   struct snd_soc_dai *cpu_dai)
 {
        struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
 
+       spin_lock_irqsave(&spdifrx->irq_lock, flags);
        spdifrx->substream = NULL;
+       spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
+
        clk_disable_unprepare(spdifrx->kclk);
 }
 
@@ -944,6 +955,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
        spdifrx->pdev = pdev;
        init_completion(&spdifrx->cs_completion);
        spin_lock_init(&spdifrx->lock);
+       spin_lock_init(&spdifrx->irq_lock);
 
        platform_set_drvdata(pdev, spdifrx);
 
index 2991b9986f6653c2e741dfb97af2577b3b30416e..395403a2d33f82b81481ca2bdfb401f58454bfba 100644 (file)
@@ -145,6 +145,7 @@ struct snd_usb_substream {
        struct snd_usb_endpoint *sync_endpoint;
        unsigned long flags;
        bool need_setup_ep;             /* (re)configure EP at prepare? */
+       bool need_setup_fmt;            /* (re)configure fmt after resume? */
        unsigned int speed;             /* USB_SPEED_XXX */
 
        u64 formats;                    /* format bitmasks (all or'ed) */
index 9c8930bb00c8a5039804078f04bdedaf5853ab1a..0e4eab96e23e0fcc0cdf9213617cffd6914a2423 100644 (file)
@@ -370,7 +370,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
 add_sync_ep_from_ifnum:
        iface = usb_ifnum_to_if(dev, ifnum);
 
-       if (!iface || iface->num_altsetting == 0)
+       if (!iface || iface->num_altsetting < 2)
                return -EINVAL;
 
        alts = &iface->altsetting[1];
@@ -506,15 +506,15 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        if (WARN_ON(!iface))
                return -EINVAL;
        alts = usb_altnum_to_altsetting(iface, fmt->altsetting);
-       altsd = get_iface_desc(alts);
-       if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting))
+       if (WARN_ON(!alts))
                return -EINVAL;
+       altsd = get_iface_desc(alts);
 
-       if (fmt == subs->cur_audiofmt)
+       if (fmt == subs->cur_audiofmt && !subs->need_setup_fmt)
                return 0;
 
        /* close the old interface */
-       if (subs->interface >= 0 && subs->interface != fmt->iface) {
+       if (subs->interface >= 0 && (subs->interface != fmt->iface || subs->need_setup_fmt)) {
                if (!subs->stream->chip->keep_iface) {
                        err = usb_set_interface(subs->dev, subs->interface, 0);
                        if (err < 0) {
@@ -528,6 +528,9 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
                subs->altset_idx = 0;
        }
 
+       if (subs->need_setup_fmt)
+               subs->need_setup_fmt = false;
+
        /* set interface */
        if (iface->cur_altsetting != alts) {
                err = snd_usb_select_mode_quirk(subs, fmt);
@@ -1728,6 +1731,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
                subs->data_endpoint->retire_data_urb = retire_playback_urb;
                subs->running = 0;
                return 0;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (subs->stream->chip->setup_fmt_after_resume_quirk) {
+                       stop_endpoints(subs, true);
+                       subs->need_setup_fmt = true;
+                       return 0;
+               }
+               break;
        }
 
        return -EINVAL;
@@ -1760,6 +1770,13 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
                subs->data_endpoint->retire_data_urb = retire_capture_urb;
                subs->running = 1;
                return 0;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               if (subs->stream->chip->setup_fmt_after_resume_quirk) {
+                       stop_endpoints(subs, true);
+                       subs->need_setup_fmt = true;
+                       return 0;
+               }
+               break;
        }
 
        return -EINVAL;
index 70c338f3ae241b278255e37775d34e216b1de4e2..d187aa6d50db0cd1f810495cdc84318019f61f16 100644 (file)
@@ -3466,7 +3466,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                .vendor_name = "Dell",
                .product_name = "WD19 Dock",
                .profile_name = "Dell-WD15-Dock",
-               .ifnum = QUIRK_NO_INTERFACE
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_SETUP_FMT_AFTER_RESUME
        }
 },
 /* MOTU Microbook II */
index 349e1e52996d8924c812f0177998d45fee970950..82184036437b50d203e9d1581b35cd760cd63bda 100644 (file)
@@ -508,6 +508,16 @@ static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
        return snd_usb_create_mixer(chip, quirk->ifnum, 0);
 }
 
+
+static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip,
+                                      struct usb_interface *iface,
+                                      struct usb_driver *driver,
+                                      const struct snd_usb_audio_quirk *quirk)
+{
+       chip->setup_fmt_after_resume_quirk = 1;
+       return 1;       /* Continue with creating streams and mixer */
+}
+
 /*
  * audio-interface quirks
  *
@@ -546,6 +556,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
                [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
                [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
                [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
+               [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk,
        };
 
        if (quirk->type < QUIRK_TYPE_COUNT) {
@@ -1386,6 +1397,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+       case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
        case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
        case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
index ff3cbf653de80df519891866969db4615a27e70e..6fe3ab582ec6ac31fe5c402a82c43e9db4b80b15 100644 (file)
@@ -33,7 +33,7 @@ struct snd_usb_audio {
        wait_queue_head_t shutdown_wait;
        unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
        unsigned int tx_length_quirk:1; /* Put length specifier in transfers */
-       
+       unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */
        int num_interfaces;
        int num_suspended_intf;
        int sample_rate_read_error;
@@ -98,6 +98,7 @@ enum quirk_type {
        QUIRK_AUDIO_EDIROL_UAXX,
        QUIRK_AUDIO_ALIGN_TRANSFER,
        QUIRK_AUDIO_STANDARD_MIXER,
+       QUIRK_SETUP_FMT_AFTER_RESUME,
 
        QUIRK_TYPE_COUNT
 };
index cd389d21219a769cc1914848c3d6422c10de8da2..cf258dd13050fdab578f89a9fa69b12550ce855c 100644 (file)
@@ -1755,7 +1755,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
                __func__, (unsigned int)res_mmio->start,
                (unsigned int)res_mmio->end);
 
-       card_ctx->mmio_start = ioremap_nocache(res_mmio->start,
+       card_ctx->mmio_start = ioremap(res_mmio->start,
                                               (size_t)(resource_size(res_mmio)));
        if (!card_ctx->mmio_start) {
                dev_err(&pdev->dev, "Could not get ioremap\n");
index 2769360f195cafe1ead78ea917495778a7a52c21..03cd7c19a683bb809ab7ad78e9e4916d7a2113ac 100644 (file)
@@ -131,8 +131,9 @@ struct kvm_vcpu_events {
        struct {
                __u8 serror_pending;
                __u8 serror_has_esr;
+               __u8 ext_dabt_pending;
                /* Align it to 8 bytes */
-               __u8 pad[6];
+               __u8 pad[5];
                __u64 serror_esr;
        } exception;
        __u32 reserved[12];
index 67c21f9bdbad2556be4114dfa09b8006a11e708e..820e5751ada71ab1ce0e08b90e026d4a9231dd36 100644 (file)
@@ -164,8 +164,9 @@ struct kvm_vcpu_events {
        struct {
                __u8 serror_pending;
                __u8 serror_has_esr;
+               __u8 ext_dabt_pending;
                /* Align it to 8 bytes */
-               __u8 pad[6];
+               __u8 pad[5];
                __u64 serror_esr;
        } exception;
        __u32 reserved[12];
@@ -323,6 +324,8 @@ struct kvm_vcpu_events {
 #define KVM_ARM_VCPU_TIMER_CTRL                1
 #define   KVM_ARM_VCPU_TIMER_IRQ_VTIMER                0
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER                1
+#define KVM_ARM_VCPU_PVTIME_CTRL       2
+#define   KVM_ARM_VCPU_PVTIME_IPA      0
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_VCPU2_SHIFT                28
index b0f72dea8b11ac689c990971dbf78c1601ef4e7a..264e266a85bf6a99c5b27b47733ad5f11b5e02d5 100644 (file)
@@ -667,6 +667,8 @@ struct kvm_ppc_cpu_char {
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES       1       /* 64-bit source attributes */
+#define KVM_DEV_XICS_GRP_CTRL          2
+#define   KVM_DEV_XICS_NR_SERVERS      1
 
 /* Layout of 64-bit source attribute values */
 #define  KVM_XICS_DESTINATION_SHIFT    0
@@ -683,6 +685,7 @@ struct kvm_ppc_cpu_char {
 #define KVM_DEV_XIVE_GRP_CTRL          1
 #define   KVM_DEV_XIVE_RESET           1
 #define   KVM_DEV_XIVE_EQ_SYNC         2
+#define   KVM_DEV_XIVE_NR_SERVERS      3
 #define KVM_DEV_XIVE_GRP_SOURCE                2       /* 64-bit source identifier */
 #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3       /* 64-bit source identifier */
 #define KVM_DEV_XIVE_GRP_EQ_CONFIG     4       /* 64-bit EQ identifier */
index 0652d3eed9bda96828657cba8250cdc9903a2469..e9b62498fe75a3f3fce3692678e5ff28bbb28880 100644 (file)
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired Count */
 #define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_RDPRU              (13*32+ 4) /* Read processor register at user level */
 #define X86_FEATURE_WBNOINVD           (13*32+ 9) /* WBNOINVD instruction */
 #define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch Prediction Barrier */
 #define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_BUG_MDS                    X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
 #define X86_BUG_MSBDS_ONLY             X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
 #define X86_BUG_SWAPGS                 X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+#define X86_BUG_TAA                    X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
+#define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 20ce682a2540f389dc62f29dbf07c0b3f32620ea..084e98da04a7ec3e102c26c71fb2f0234437a844 100644 (file)
                                                  * Microarchitectural Data
                                                  * Sampling (MDS) vulnerabilities.
                                                  */
+#define ARCH_CAP_PSCHANGE_MC_NO                BIT(6)   /*
+                                                 * The processor is not susceptible to a
+                                                 * machine check error due to modifying the
+                                                 * code page size along with either the
+                                                 * physical address or cache type
+                                                 * without TLB invalidation.
+                                                 */
+#define ARCH_CAP_TSX_CTRL_MSR          BIT(7)  /* MSR for TSX control is available. */
+#define ARCH_CAP_TAA_NO                        BIT(8)  /*
+                                                * Not susceptible to
+                                                * TSX Async Abort (TAA) vulnerabilities.
+                                                */
 
 #define MSR_IA32_FLUSH_CMD             0x0000010b
 #define L1D_FLUSH                      BIT(0)  /*
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
 
+#define MSR_IA32_TSX_CTRL              0x00000122
+#define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
+#define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
+
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 #define MSR_IA32_SYSENTER_EIP          0x00000176
 #define MSR_AMD_PSTATE_DEF_BASE                0xc0010064
 #define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
 #define MSR_AMD64_OSVW_STATUS          0xc0010141
+#define MSR_AMD_PPIN_CTL               0xc00102f0
+#define MSR_AMD_PPIN                   0xc00102f1
 #define MSR_AMD64_LS_CFG               0xc0011020
 #define MSR_AMD64_DC_CFG               0xc0011022
 #define MSR_AMD64_BU_CFG2              0xc001102a
index 92748660ba51234f31a651d9181f50605418f0c9..df767afc690fea3c0eb66e0135f7437e37953e3a 100644 (file)
@@ -28,8 +28,8 @@
  * Output:
  * rax original destination
  */
-ENTRY(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
+SYM_FUNC_START_LOCAL(memcpy)
        ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
                      "jmp memcpy_erms", X86_FEATURE_ERMS
 
@@ -41,8 +41,8 @@ ENTRY(memcpy)
        movl %edx, %ecx
        rep movsb
        ret
-ENDPROC(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END(memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(memcpy)
 EXPORT_SYMBOL(__memcpy)
 
@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy)
  * memcpy_erms() - enhanced fast string memcpy. This is faster and
  * simpler than memcpy. Use memcpy_erms when possible.
  */
-ENTRY(memcpy_erms)
+SYM_FUNC_START(memcpy_erms)
        movq %rdi, %rax
        movq %rdx, %rcx
        rep movsb
        ret
-ENDPROC(memcpy_erms)
+SYM_FUNC_END(memcpy_erms)
 
-ENTRY(memcpy_orig)
+SYM_FUNC_START(memcpy_orig)
        movq %rdi, %rax
 
        cmpq $0x20, %rdx
@@ -182,7 +182,7 @@ ENTRY(memcpy_orig)
 
 .Lend:
        retq
-ENDPROC(memcpy_orig)
+SYM_FUNC_END(memcpy_orig)
 
 #ifndef CONFIG_UML
 
@@ -193,7 +193,7 @@ MCSAFE_TEST_CTL
  * Note that we only catch machine checks when reading the source addresses.
  * Writes to target are posted and don't generate machine checks.
  */
-ENTRY(__memcpy_mcsafe)
+SYM_FUNC_START(__memcpy_mcsafe)
        cmpl $8, %edx
        /* Less than 8 bytes? Go to byte copy loop */
        jb .L_no_whole_words
@@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe)
        xorl %eax, %eax
 .L_done:
        ret
-ENDPROC(__memcpy_mcsafe)
+SYM_FUNC_END(__memcpy_mcsafe)
 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
 
        .section .fixup, "ax"
index f8f3dc0a6690e92f19ce5bc398f33451907a66cb..fd5d25a474b7c66a04955623f042c3bfb43f62cb 100644 (file)
@@ -18,8 +18,8 @@
  *
  * rax   original destination
  */
-ENTRY(memset)
-ENTRY(__memset)
+SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START(__memset)
        /*
         * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
         * to use it when possible. If not available, use fast string instructions.
@@ -42,8 +42,8 @@ ENTRY(__memset)
        rep stosb
        movq %r9,%rax
        ret
-ENDPROC(memset)
-ENDPROC(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_END_ALIAS(memset)
 
 /*
  * ISO C memset - set a memory block to a byte value. This function uses
@@ -56,16 +56,16 @@ ENDPROC(__memset)
  *
  * rax   original destination
  */
-ENTRY(memset_erms)
+SYM_FUNC_START(memset_erms)
        movq %rdi,%r9
        movb %sil,%al
        movq %rdx,%rcx
        rep stosb
        movq %r9,%rax
        ret
-ENDPROC(memset_erms)
+SYM_FUNC_END(memset_erms)
 
-ENTRY(memset_orig)
+SYM_FUNC_START(memset_orig)
        movq %rdi,%r10
 
        /* expand byte value  */
@@ -136,4 +136,4 @@ ENTRY(memset_orig)
        subq %r8,%rdx
        jmp .Lafter_bad_alignment
 .Lfinal:
-ENDPROC(memset_orig)
+SYM_FUNC_END(memset_orig)
index d66131f696892065240e225e2c86d44278ea4e6e..397e5716ab6d869f51496d29bd218f1a30eadf10 100644 (file)
@@ -26,7 +26,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
                           bool is_plain_text)
 {
        if (is_plain_text)
-               jsonw_printf(jw, "%p", data);
+               jsonw_printf(jw, "%p", *(void **)data);
        else
                jsonw_printf(jw, "%lu", *(unsigned long *)data);
 }
index 4535c863d2cd9136ef542bd50883bd51f3fc5bba..2ce9c5ba1934767386c3a8ffe576d04b3eb3f466 100644 (file)
@@ -493,7 +493,7 @@ static int do_dump(int argc, char **argv)
 
        info = &info_linear->info;
        if (mode == DUMP_JITED) {
-               if (info->jited_prog_len == 0) {
+               if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
                        p_info("no instructions returned");
                        goto err_free;
                }
index 494d7ae3614dea003d2bd774917a3f27e4d119ca..5b91ee65a080260e857187a30b921687c1532ab4 100644 (file)
@@ -174,7 +174,7 @@ static const char *print_call(void *private_data,
        struct kernel_sym *sym;
 
        if (insn->src_reg == BPF_PSEUDO_CALL &&
-           (__u32) insn->imm < dd->nr_jited_ksyms)
+           (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms)
                address = dd->jited_ksyms[insn->imm];
 
        sym = kernel_syms_search(dd, address);
index f30a89046aa3b9d2eba12d5b0540a41824293ff1..7ac0d8088565ba203f36e4b22b111747eea63157 100644 (file)
@@ -197,7 +197,7 @@ $(OUTPUT)test-libcrypto.bin:
        $(BUILD) -lcrypto
 
 $(OUTPUT)test-gtk2.bin:
-       $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
+       $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) -Wno-deprecated-declarations
 
 $(OUTPUT)test-gtk2-infobar.bin:
        $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
index a2b3f092d2f022cd73e3c4d06005d16485c6a307..7d87075cd1c5dd6e3e0964a9c428aa6af53ce6ed 100644 (file)
@@ -1,9 +1,15 @@
 // SPDX-License-Identifier: GPL-2.0
+#include "clang/Basic/Version.h"
+#if CLANG_VERSION_MAJOR < 8
 #include "clang/Basic/VirtualFileSystem.h"
+#endif
 #include "clang/Driver/Driver.h"
 #include "clang/Frontend/TextDiagnosticPrinter.h"
 #include "llvm/ADT/IntrusiveRefCntPtr.h"
 #include "llvm/Support/ManagedStatic.h"
+#if CLANG_VERSION_MAJOR >= 8
+#include "llvm/Support/VirtualFileSystem.h"
+#endif
 #include "llvm/Support/raw_ostream.h"
 
 using namespace clang;
index f79b23582a1d0c846451cc7e04ee999dc1b11dd8..7427a5ee761b3757feaf04ff76bf3afd7a91a90c 100644 (file)
@@ -72,7 +72,7 @@ class BlkgIterator:
         name = BlkgIterator.blkcg_name(blkcg)
         path = parent_path + '/' + name if parent_path else name
         blkg = drgn.Object(prog, 'struct blkcg_gq',
-                           address=radix_tree_lookup(blkcg.blkg_tree, q_id))
+                           address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id))
         if not blkg.address_:
             return
 
@@ -228,7 +228,7 @@ q_id = None
 root_iocg = None
 ioc = None
 
-for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree):
+for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()):
     blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr)
     try:
         if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'):
index 05dca5c203f39ae4a2e0c5217169de0a5a1dc7cf..477a1cae513f2f98ddf49d4bd5062e18603eed46 100644 (file)
@@ -15,6 +15,8 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
                 const unsigned long *bitmap2, int bits);
 int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
                 const unsigned long *bitmap2, unsigned int bits);
+int __bitmap_equal(const unsigned long *bitmap1,
+                  const unsigned long *bitmap2, unsigned int bits);
 void bitmap_clear(unsigned long *map, unsigned int start, int len);
 
 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
@@ -123,6 +125,15 @@ static inline unsigned long *bitmap_alloc(int nbits)
        return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
 }
 
+/*
+ * bitmap_free - Free bitmap
+ * @bitmap: pointer to bitmap
+ */
+static inline void bitmap_free(unsigned long *bitmap)
+{
+       free(bitmap);
+}
+
 /*
  * bitmap_scnprintf - print bitmap list into buffer
  * @bitmap: bitmap
@@ -148,4 +159,23 @@ static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
        return __bitmap_and(dst, src1, src2, nbits);
 }
 
+#ifdef __LITTLE_ENDIAN
+#define BITMAP_MEM_ALIGNMENT 8
+#else
+#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+#endif
+#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+static inline int bitmap_equal(const unsigned long *src1,
+                       const unsigned long *src2, unsigned int nbits)
+{
+       if (small_const_nbits(nbits))
+               return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+       if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+           IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
+               return !memcmp(src1, src2, nbits / 8);
+       return __bitmap_equal(src1, src2, nbits);
+}
+
 #endif /* _PERF_BITOPS_H */
index 980cb9266718ae754ac6a492dad82078c2dd53e9..5e9e781905edc526c94b07374a8ddefc3318ae2b 100644 (file)
@@ -17,7 +17,15 @@ int strtobool(const char *s, bool *res);
  * However uClibc headers also define __GLIBC__ hence the hack below
  */
 #if defined(__GLIBC__) && !defined(__UCLIBC__)
+// pragma diagnostic was introduced in gcc 4.6
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
+#endif
 extern size_t strlcpy(char *dest, const char *src, size_t size);
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic pop
+#endif
 #endif
 
 char *str_error_r(int errnum, char *buf, size_t buflen);
index 8a5b2f8f8eb98b0f1f170960239327d420971354..868bf7996c0f8920e589b58b192528c5a602a9fe 100644 (file)
@@ -778,11 +778,12 @@ struct drm_syncobj_array {
        __u32 pad;
 };
 
+#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
 struct drm_syncobj_timeline_array {
        __u64 handles;
        __u64 points;
        __u32 count_handles;
-       __u32 pad;
+       __u32 flags;
 };
 
 
index 469dc512cca3513f0a38400c5ed1598096147de9..5400d7e057f143abdeb124c229ccfe7cdeb58196 100644 (file)
@@ -611,6 +611,13 @@ typedef struct drm_i915_irq_wait {
  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
  */
 #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/*
+ * Revision of the i915-perf uAPI. The value returned helps determine what
+ * i915-perf features are available. See drm_i915_perf_property_id.
+ */
+#define I915_PARAM_PERF_REVISION       54
+
 /* Must be kept compact -- no holes and well documented */
 
 typedef struct drm_i915_getparam {
@@ -1565,6 +1572,21 @@ struct drm_i915_gem_context_param {
  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
  */
 #define I915_CONTEXT_PARAM_ENGINES     0xa
+
+/*
+ * I915_CONTEXT_PARAM_PERSISTENCE:
+ *
+ * Allow the context and active rendering to survive the process until
+ * completion. Persistence allows fire-and-forget clients to queue up a
+ * bunch of work, hand the output over to a display server and then quit.
+ * If the context is marked as not persistent, upon closing (either via
+ * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
+ * or process termination), the context and any outstanding requests will be
+ * cancelled (and exported fences for cancelled requests marked as -EIO).
+ *
+ * By default, new contexts allow persistence.
+ */
+#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
 /* Must be kept compact -- no holes and well documented */
 
        __u64 value;
@@ -1844,23 +1866,31 @@ enum drm_i915_perf_property_id {
         * Open the stream for a specific context handle (as used with
         * execbuffer2). A stream opened for a specific context this way
         * won't typically require root privileges.
+        *
+        * This property is available in perf revision 1.
         */
        DRM_I915_PERF_PROP_CTX_HANDLE = 1,
 
        /**
         * A value of 1 requests the inclusion of raw OA unit reports as
         * part of stream samples.
+        *
+        * This property is available in perf revision 1.
         */
        DRM_I915_PERF_PROP_SAMPLE_OA,
 
        /**
         * The value specifies which set of OA unit metrics should be
         * be configured, defining the contents of any OA unit reports.
+        *
+        * This property is available in perf revision 1.
         */
        DRM_I915_PERF_PROP_OA_METRICS_SET,
 
        /**
         * The value specifies the size and layout of OA unit reports.
+        *
+        * This property is available in perf revision 1.
         */
        DRM_I915_PERF_PROP_OA_FORMAT,
 
@@ -1870,9 +1900,22 @@ enum drm_i915_perf_property_id {
         * from this exponent as follows:
         *
         *   80ns * 2^(period_exponent + 1)
+        *
+        * This property is available in perf revision 1.
         */
        DRM_I915_PERF_PROP_OA_EXPONENT,
 
+       /**
+        * Specifying this property is only valid when specify a context to
+        * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
+        * will hold preemption of the particular context we want to gather
+        * performance data about. The execbuf2 submissions must include a
+        * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
+        *
+        * This property is available in perf revision 3.
+        */
+       DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+
        DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
@@ -1901,6 +1944,8 @@ struct drm_i915_perf_open_param {
  * to close and re-open a stream with the same configuration.
  *
  * It's undefined whether any pending data for the stream will be lost.
+ *
+ * This ioctl is available in perf revision 1.
  */
 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
 
@@ -1908,9 +1953,24 @@ struct drm_i915_perf_open_param {
  * Disable data capture for a stream.
  *
  * It is an error to try and read a stream that is disabled.
+ *
+ * This ioctl is available in perf revision 1.
  */
 #define I915_PERF_IOCTL_DISABLE        _IO('i', 0x1)
 
+/**
+ * Change metrics_set captured by a stream.
+ *
+ * If the stream is bound to a specific context, the configuration change
+ * will performed inline with that context such that it takes effect before
+ * the next execbuf submission.
+ *
+ * Returns the previously bound metrics set id, or a negative error code.
+ *
+ * This ioctl is available in perf revision 2.
+ */
+#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
+
 /**
  * Common to all i915 perf records
  */
@@ -1984,6 +2044,7 @@ struct drm_i915_query_item {
        __u64 query_id;
 #define DRM_I915_QUERY_TOPOLOGY_INFO    1
 #define DRM_I915_QUERY_ENGINE_INFO     2
+#define DRM_I915_QUERY_PERF_CONFIG      3
 /* Must be kept compact -- no holes and well documented */
 
        /*
@@ -1995,9 +2056,18 @@ struct drm_i915_query_item {
        __s32 length;
 
        /*
-        * Unused for now. Must be cleared to zero.
+        * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+        *
+        * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+        * following :
+        *         - DRM_I915_QUERY_PERF_CONFIG_LIST
+        *         - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+        *         - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
         */
        __u32 flags;
+#define DRM_I915_QUERY_PERF_CONFIG_LIST          1
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
 
        /*
         * Data will be written at the location pointed by data_ptr when the
@@ -2033,8 +2103,10 @@ struct drm_i915_query {
  *           (data[X / 8] >> (X % 8)) & 1
  *
  * - the subslice mask for each slice with one bit per subslice telling
- *   whether a subslice is available. The availability of subslice Y in slice
- *   X can be queried with the following formula :
+ *   whether a subslice is available. Gen12 has dual-subslices, which are
+ *   similar to two gen11 subslices. For gen12, this array represents dual-
+ *   subslices. The availability of subslice Y in slice X can be queried
+ *   with the following formula :
  *
  *           (data[subslice_offset +
  *                 X * subslice_stride +
@@ -2123,6 +2195,56 @@ struct drm_i915_query_engine_info {
        struct drm_i915_engine_info engines[];
 };
 
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+ */
+struct drm_i915_query_perf_config {
+       union {
+               /*
+                * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
+                * this fields to the number of configurations available.
+                */
+               __u64 n_configs;
+
+               /*
+                * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
+                * i915 will use the value in this field as configuration
+                * identifier to decide what data to write into config_ptr.
+                */
+               __u64 config;
+
+               /*
+                * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
+                * i915 will use the value in this field as configuration
+                * identifier to decide what data to write into config_ptr.
+                *
+                * String formatted like "%08x-%04x-%04x-%04x-%012x"
+                */
+               char uuid[36];
+       };
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 flags;
+
+       /*
+        * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
+        * write an array of __u64 of configuration identifiers.
+        *
+        * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
+        * write a struct drm_i915_perf_oa_config. If the following fields of
+        * drm_i915_perf_oa_config are set not set to 0, i915 will write into
+        * the associated pointers the values of submitted when the
+        * configuration was created :
+        *
+        *         - n_mux_regs
+        *         - n_boolean_regs
+        *         - n_flex_regs
+        */
+       __u8 data[];
+};
+
 #if defined(__cplusplus)
 }
 #endif
index 39ccfe9311c387ba0f6c0f88e605c05b0189e02a..1beb174ad9505634151c5ac2896ae63bcced028e 100644 (file)
@@ -17,7 +17,8 @@
 #define FSCRYPT_POLICY_FLAGS_PAD_32            0x03
 #define FSCRYPT_POLICY_FLAGS_PAD_MASK          0x03
 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY         0x04
-#define FSCRYPT_POLICY_FLAGS_VALID             0x07
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64     0x08
+#define FSCRYPT_POLICY_FLAGS_VALID             0x0F
 
 /* Encryption algorithms */
 #define FSCRYPT_MODE_AES_256_XTS               1
index 52641d8ca9e83f25b983f3cc6be115c37bad2d98..f0a16b4adbbd63c421006f6ca9b0fd9a892f7a5d 100644 (file)
@@ -235,6 +235,7 @@ struct kvm_hyperv_exit {
 #define KVM_EXIT_S390_STSI        25
 #define KVM_EXIT_IOAPIC_EOI       26
 #define KVM_EXIT_HYPERV           27
+#define KVM_EXIT_ARM_NISV         28
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -394,6 +395,11 @@ struct kvm_run {
                } eoi;
                /* KVM_EXIT_HYPERV */
                struct kvm_hyperv_exit hyperv;
+               /* KVM_EXIT_ARM_NISV */
+               struct {
+                       __u64 esr_iss;
+                       __u64 fault_ipa;
+               } arm_nisv;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -1000,6 +1006,9 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PMU_EVENT_FILTER 173
 #define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
 #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
+#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176
+#define KVM_CAP_ARM_NISV_TO_USER 177
+#define KVM_CAP_ARM_INJECT_EXT_DABT 178
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1227,6 +1236,8 @@ enum kvm_device_type {
 #define KVM_DEV_TYPE_ARM_VGIC_ITS      KVM_DEV_TYPE_ARM_VGIC_ITS
        KVM_DEV_TYPE_XIVE,
 #define KVM_DEV_TYPE_XIVE              KVM_DEV_TYPE_XIVE
+       KVM_DEV_TYPE_ARM_PV_TIME,
+#define KVM_DEV_TYPE_ARM_PV_TIME       KVM_DEV_TYPE_ARM_PV_TIME
        KVM_DEV_TYPE_MAX,
 };
 
@@ -1337,6 +1348,7 @@ struct kvm_s390_ucas_mapping {
 #define KVM_PPC_GET_CPU_CHAR     _IOR(KVMIO,  0xb1, struct kvm_ppc_cpu_char)
 /* Available with KVM_CAP_PMU_EVENT_FILTER */
 #define KVM_SET_PMU_EVENT_FILTER  _IOW(KVMIO,  0xb2, struct kvm_pmu_event_filter)
+#define KVM_PPC_SVM_OFF                  _IO(KVMIO,  0xb3)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index 99335e1f4a275b1dbc86219a6dee7701b18fd624..4a02178324641f555336164316ba2e30805c3719 100644 (file)
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
+/* Flags for the clone3() syscall. */
+#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+
 #ifndef __ASSEMBLY__
 /**
  * struct clone_args - arguments for the clone3 syscall
- * @flags:       Flags for the new process as listed above.
- *               All flags are valid except for CSIGNAL and
- *               CLONE_DETACHED.
- * @pidfd:       If CLONE_PIDFD is set, a pidfd will be
- *               returned in this argument.
- * @child_tid:   If CLONE_CHILD_SETTID is set, the TID of the
- *               child process will be returned in the child's
- *               memory.
- * @parent_tid:  If CLONE_PARENT_SETTID is set, the TID of
- *               the child process will be returned in the
- *               parent's memory.
- * @exit_signal: The exit_signal the parent process will be
- *               sent when the child exits.
- * @stack:       Specify the location of the stack for the
- *               child process.
- * @stack_size:  The size of the stack for the child process.
- * @tls:         If CLONE_SETTLS is set, the tls descriptor
- *               is set to tls.
+ * @flags:        Flags for the new process as listed above.
+ *                All flags are valid except for CSIGNAL and
+ *                CLONE_DETACHED.
+ * @pidfd:        If CLONE_PIDFD is set, a pidfd will be
+ *                returned in this argument.
+ * @child_tid:    If CLONE_CHILD_SETTID is set, the TID of the
+ *                child process will be returned in the child's
+ *                memory.
+ * @parent_tid:   If CLONE_PARENT_SETTID is set, the TID of
+ *                the child process will be returned in the
+ *                parent's memory.
+ * @exit_signal:  The exit_signal the parent process will be
+ *                sent when the child exits.
+ * @stack:        Specify the location of the stack for the
+ *                child process.
+ *                Note, @stack is expected to point to the
+ *                lowest address. The stack direction will be
+ *                determined by the kernel and set up
+ *                appropriately based on @stack_size.
+ * @stack_size:   The size of the stack for the child process.
+ * @tls:          If CLONE_SETTLS is set, the tls descriptor
+ *                is set to tls.
+ * @set_tid:      Pointer to an array of type *pid_t. The size
+ *                of the array is defined using @set_tid_size.
+ *                This array is used to select PIDs/TIDs for
+ *                newly created processes. The first element in
+ *                this defines the PID in the most nested PID
+ *                namespace. Each additional element in the array
+ *                defines the PID in the parent PID namespace of
+ *                the original PID namespace. If the array has
+ *                less entries than the number of currently
+ *                nested PID namespaces only the PIDs in the
+ *                corresponding namespaces are set.
+ * @set_tid_size: This defines the size of the array referenced
+ *                in @set_tid. This cannot be larger than the
+ *                kernel's limit of nested PID namespaces.
  *
  * The structure is versioned by size and thus extensible.
  * New struct members must go at the end of the struct and
@@ -68,10 +89,13 @@ struct clone_args {
        __aligned_u64 stack;
        __aligned_u64 stack_size;
        __aligned_u64 tls;
+       __aligned_u64 set_tid;
+       __aligned_u64 set_tid_size;
 };
 #endif
 
 #define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
+#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
 
 /*
  * Scheduling policies
index 7b35e98d3c58b1df9f4315e3f76a2564c25a106b..ad80a5c885d598231ccf207ac48d2d908eaf1b85 100644 (file)
@@ -167,8 +167,8 @@ struct statx {
 #define STATX_ATTR_APPEND              0x00000020 /* [I] File is append-only */
 #define STATX_ATTR_NODUMP              0x00000040 /* [I] File is not to be dumped */
 #define STATX_ATTR_ENCRYPTED           0x00000800 /* [I] File requires key to decrypt in fs */
-
 #define STATX_ATTR_AUTOMOUNT           0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_VERITY              0x00100000 /* [I] Verity protected file */
 
 
 #endif /* _UAPI_LINUX_STAT_H */
index 11b3885e833ed8eb4cfd7f32ba981abcdf58e5cf..027b18f7ed8cfd169319c08db2e68239b9ab9333 100644 (file)
@@ -210,6 +210,7 @@ static bool fs__env_override(struct fs *fs)
        size_t name_len = strlen(fs->name);
        /* name + "_PATH" + '\0' */
        char upper_name[name_len + 5 + 1];
+
        memcpy(upper_name, fs->name, name_len);
        mem_toupper(upper_name, name_len);
        strcpy(&upper_name[name_len], "_PATH");
@@ -219,7 +220,8 @@ static bool fs__env_override(struct fs *fs)
                return false;
 
        fs->found = true;
-       strncpy(fs->path, override_path, sizeof(fs->path));
+       strncpy(fs->path, override_path, sizeof(fs->path) - 1);
+       fs->path[sizeof(fs->path) - 1] = '\0';
        return true;
 }
 
index 38494782be06e386d3df83d4ed262faf0c9c958f..5043747ef6c5f9f2d662f26add8736ddc275b461 100644 (file)
@@ -71,3 +71,18 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
                           BITMAP_LAST_WORD_MASK(bits));
        return result != 0;
 }
+
+int __bitmap_equal(const unsigned long *bitmap1,
+               const unsigned long *bitmap2, unsigned int bits)
+{
+       unsigned int k, lim = bits/BITS_PER_LONG;
+       for (k = 0; k < lim; ++k)
+               if (bitmap1[k] != bitmap2[k])
+                       return 0;
+
+       if (bits % BITS_PER_LONG)
+               if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+                       return 0;
+
+       return 1;
+}
index defae23a0169d8d4b653ab7305c1f3bf3c664742..97830e46d1a05e182b16b1baf049073638aa1ebf 100644 (file)
@@ -138,6 +138,7 @@ STATIC_OBJDIR       := $(OUTPUT)staticobjs/
 BPF_IN_SHARED  := $(SHARED_OBJDIR)libbpf-in.o
 BPF_IN_STATIC  := $(STATIC_OBJDIR)libbpf-in.o
 VERSION_SCRIPT := libbpf.map
+BPF_HELPER_DEFS        := $(OUTPUT)bpf_helper_defs.h
 
 LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
 LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
@@ -159,7 +160,7 @@ all: fixdep
 
 all_cmd: $(CMD_TARGETS) check
 
-$(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
+$(BPF_IN_SHARED): force elfdep bpfdep $(BPF_HELPER_DEFS)
        @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
        (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -177,12 +178,12 @@ $(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
        $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
 
-$(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h
+$(BPF_IN_STATIC): force elfdep bpfdep $(BPF_HELPER_DEFS)
        $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
 
-bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h
+$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
        $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header              \
-               --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h
+               --file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS)
 
 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
@@ -243,7 +244,7 @@ install_lib: all_cmd
                $(call do_install_mkdir,$(libdir_SQ)); \
                cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
 
-install_headers: bpf_helper_defs.h
+install_headers: $(BPF_HELPER_DEFS)
        $(call QUIET_INSTALL, headers) \
                $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
                $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
@@ -251,7 +252,7 @@ install_headers: bpf_helper_defs.h
                $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
                $(call do_install,xsk.h,$(prefix)/include/bpf,644); \
                $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
-               $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
+               $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \
                $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
                $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
                $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
@@ -271,7 +272,7 @@ config-clean:
 clean:
        $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
                *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
-               *.pc LIBBPF-CFLAGS bpf_helper_defs.h \
+               *.pc LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
                $(SHARED_OBJDIR) $(STATIC_OBJDIR)
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
similarity index 100%
rename from tools/perf/lib/Build
rename to tools/lib/perf/Build
diff --git a/tools/lib/perf/Documentation/Makefile b/tools/lib/perf/Documentation/Makefile
new file mode 100644 (file)
index 0000000..9727540
--- /dev/null
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Most of this file is copied from tools/perf/Documentation/Makefile
+
+include ../../../scripts/Makefile.include
+include ../../../scripts/utilities.mak
+
+MAN3_TXT  = libperf.txt
+MAN7_TXT  = libperf-counting.txt libperf-sampling.txt
+MAN_EX    = examples/*.c
+
+MAN_TXT   = $(MAN3_TXT) $(MAN7_TXT)
+
+_MAN_XML  = $(patsubst %.txt,%.xml,$(MAN_TXT))
+_MAN_HTML = $(patsubst %.txt,%.html,$(MAN_TXT))
+_MAN_3    = $(patsubst %.txt,%.3,$(MAN3_TXT))
+_MAN_7    = $(patsubst %.txt,%.7,$(MAN7_TXT))
+
+MAN_XML   = $(addprefix $(OUTPUT),$(_MAN_XML))
+MAN_HTML  = $(addprefix $(OUTPUT),$(_MAN_HTML))
+MAN_3     = $(addprefix $(OUTPUT),$(_MAN_3))
+MAN_7     = $(addprefix $(OUTPUT),$(_MAN_7))
+MAN_X     = $(MAN_3) $(MAN_7)
+
+# Make the path relative to DESTDIR, not prefix
+ifndef DESTDIR
+  prefix ?=$(HOME)
+endif
+
+mandir  ?= $(prefix)/share/man
+man3dir  = $(mandir)/man3
+man7dir  = $(mandir)/man7
+
+docdir  ?= $(prefix)/share/doc/libperf
+htmldir  = $(docdir)/html
+exdir    = $(docdir)/examples
+
+ASCIIDOC        = asciidoc
+ASCIIDOC_EXTRA  = --unsafe -f asciidoc.conf
+ASCIIDOC_HTML   = xhtml11
+MANPAGE_XSL     = manpage-normal.xsl
+XMLTO_EXTRA     =
+XMLTO           =xmlto
+
+INSTALL ?= install
+RM      ?= rm -f
+
+# For asciidoc ...
+#      -7.1.2, no extra settings are needed.
+#      8.0-,   set ASCIIDOC8.
+#
+
+# For docbook-xsl ...
+#      -1.68.1,        set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0)
+#      1.69.0,         no extra settings are needed?
+#      1.69.1-1.71.0,  set DOCBOOK_SUPPRESS_SP?
+#      1.71.1,         no extra settings are needed?
+#      1.72.0,         set DOCBOOK_XSL_172.
+#      1.73.0-,        set ASCIIDOC_NO_ROFF
+
+# If you had been using DOCBOOK_XSL_172 in an attempt to get rid
+# of 'the ".ft C" problem' in your generated manpages, and you
+# instead ended up with weird characters around callouts, try
+# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8).
+
+ifdef ASCIIDOC8
+  ASCIIDOC_EXTRA += -a asciidoc7compatible
+endif
+ifdef DOCBOOK_XSL_172
+  ASCIIDOC_EXTRA += -a libperf-asciidoc-no-roff
+  MANPAGE_XSL = manpage-1.72.xsl
+else
+  ifdef ASCIIDOC_NO_ROFF
+    # docbook-xsl after 1.72 needs the regular XSL, but will not
+    # pass-thru raw roff codes from asciidoc.conf, so turn them off.
+    ASCIIDOC_EXTRA += -a libperf-asciidoc-no-roff
+  endif
+endif
+ifdef MAN_BOLD_LITERAL
+  XMLTO_EXTRA += -m manpage-bold-literal.xsl
+endif
+ifdef DOCBOOK_SUPPRESS_SP
+  XMLTO_EXTRA += -m manpage-suppress-sp.xsl
+endif
+
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+export DESTDIR DESTDIR_SQ
+
+# Please note that there is a minor bug in asciidoc.
+# The version after 6.0.3 _will_ include the patch found here:
+#   http://marc.theaimsgroup.com/?l=libtraceevent&m=111558757202243&w=2
+#
+# Until that version is released you may have to apply the patch
+# yourself - yes, all 6 characters of it!
+
+QUIET_SUBDIR0  = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1  =
+
+ifneq ($(findstring $(MAKEFLAGS),w),w)
+  PRINT_DIR = --no-print-directory
+else # "make -w"
+  NO_SUBDIR = :
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+  ifneq ($(V),1)
+    QUIET_ASCIIDOC = @echo '  ASCIIDOC '$@;
+    QUIET_XMLTO    = @echo '  XMLTO    '$@;
+  endif
+endif
+
+all: $(MAN_X) $(MAN_HTML)
+
+$(MAN_HTML) $(MAN_X): asciidoc.conf
+
+install-man: all
+       $(call QUIET_INSTALL, man) \
+               $(INSTALL) -d -m 755 $(DESTDIR)$(man3dir); \
+               $(INSTALL) -m 644 $(MAN_3) $(DESTDIR)$(man3dir); \
+               $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir); \
+               $(INSTALL) -m 644 $(MAN_7) $(DESTDIR)$(man7dir);
+
+install-html:
+       $(call QUIET_INSTALL, html) \
+               $(INSTALL) -d -m 755 $(DESTDIR)$(htmldir); \
+               $(INSTALL) -m 644 $(MAN_HTML) $(DESTDIR)$(htmldir); \
+
+install-examples:
+       $(call QUIET_INSTALL, examples) \
+               $(INSTALL) -d -m 755 $(DESTDIR)$(exdir); \
+               $(INSTALL) -m 644 $(MAN_EX) $(DESTDIR)$(exdir); \
+
+CLEAN_FILES =                                  \
+       $(MAN_XML) $(addsuffix +,$(MAN_XML))    \
+       $(MAN_HTML) $(addsuffix +,$(MAN_HTML))  \
+       $(MAN_X)
+
+clean:
+       $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
+
+$(MAN_3): $(OUTPUT)%.3: %.xml
+       $(QUIET_XMLTO)$(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+$(MAN_7): $(OUTPUT)%.7: %.xml
+       $(QUIET_XMLTO)$(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+$(MAN_XML): $(OUTPUT)%.xml: %.txt
+       $(QUIET_ASCIIDOC)$(ASCIIDOC) -b docbook -d manpage \
+               $(ASCIIDOC_EXTRA) -alibperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+       mv $@+ $@
+
+$(MAN_HTML): $(OUTPUT)%.html: %.txt
+       $(QUIET_ASCIIDOC)$(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
+       $(ASCIIDOC_EXTRA) -aperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+       mv $@+ $@
diff --git a/tools/lib/perf/Documentation/asciidoc.conf b/tools/lib/perf/Documentation/asciidoc.conf
new file mode 100644 (file)
index 0000000..9d5a5a5
--- /dev/null
@@ -0,0 +1,120 @@
+## linktep: macro
+#
+# Usage: linktep:command[manpage-section]
+#
+# Note, {0} is the manpage section, while {target} is the command.
+#
+# Show TEP link as: <command>(<section>); if section is defined, else just show
+# the command.
+
+[macros]
+(?su)[\\]?(?P<name>linktep):(?P<target>\S*?)\[(?P<attrlist>.*?)\]=
+
+[attributes]
+asterisk=&#42;
+plus=&#43;
+caret=&#94;
+startsb=&#91;
+endsb=&#93;
+tilde=&#126;
+
+ifdef::backend-docbook[]
+[linktep-inlinemacro]
+{0%{target}}
+{0#<citerefentry>}
+{0#<refentrytitle>{target}</refentrytitle><manvolnum>{0}</manvolnum>}
+{0#</citerefentry>}
+endif::backend-docbook[]
+
+ifdef::backend-docbook[]
+ifndef::tep-asciidoc-no-roff[]
+# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
+# v1.72 breaks with this because it replaces dots not in roff requests.
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+ifdef::doctype-manpage[]
+&#10;.ft C&#10;
+endif::doctype-manpage[]
+|
+ifdef::doctype-manpage[]
+&#10;.ft&#10;
+endif::doctype-manpage[]
+</literallayout>
+{title#}</example>
+endif::tep-asciidoc-no-roff[]
+
+ifdef::tep-asciidoc-no-roff[]
+ifdef::doctype-manpage[]
+# The following two small workarounds insert a simple paragraph after screen
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+|
+</literallayout><simpara></simpara>
+{title#}</example>
+
+[verseblock]
+<formalpara{id? id="{id}"}><title>{title}</title><para>
+{title%}<literallayout{id? id="{id}"}>
+{title#}<literallayout>
+|
+</literallayout>
+{title#}</para></formalpara>
+{title%}<simpara></simpara>
+endif::doctype-manpage[]
+endif::tep-asciidoc-no-roff[]
+endif::backend-docbook[]
+
+ifdef::doctype-manpage[]
+ifdef::backend-docbook[]
+[header]
+template::[header-declarations]
+<refentry>
+<refmeta>
+<refentrytitle>{mantitle}</refentrytitle>
+<manvolnum>{manvolnum}</manvolnum>
+<refmiscinfo class="source">libperf</refmiscinfo>
+<refmiscinfo class="version">{libperf_version}</refmiscinfo>
+<refmiscinfo class="manual">libperf Manual</refmiscinfo>
+</refmeta>
+<refnamediv>
+  <refname>{manname1}</refname>
+  <refname>{manname2}</refname>
+  <refname>{manname3}</refname>
+  <refname>{manname4}</refname>
+  <refname>{manname5}</refname>
+  <refname>{manname6}</refname>
+  <refname>{manname7}</refname>
+  <refname>{manname8}</refname>
+  <refname>{manname9}</refname>
+  <refname>{manname10}</refname>
+  <refname>{manname11}</refname>
+  <refname>{manname12}</refname>
+  <refname>{manname13}</refname>
+  <refname>{manname14}</refname>
+  <refname>{manname15}</refname>
+  <refname>{manname16}</refname>
+  <refname>{manname17}</refname>
+  <refname>{manname18}</refname>
+  <refname>{manname19}</refname>
+  <refname>{manname20}</refname>
+  <refname>{manname21}</refname>
+  <refname>{manname22}</refname>
+  <refname>{manname23}</refname>
+  <refname>{manname24}</refname>
+  <refname>{manname25}</refname>
+  <refname>{manname26}</refname>
+  <refname>{manname27}</refname>
+  <refname>{manname28}</refname>
+  <refname>{manname29}</refname>
+  <refname>{manname30}</refname>
+  <refpurpose>{manpurpose}</refpurpose>
+</refnamediv>
+endif::backend-docbook[]
+endif::doctype-manpage[]
+
+ifdef::backend-xhtml11[]
+[linktep-inlinemacro]
+<a href="{target}.html">{target}{0?({0})}</a>
+endif::backend-xhtml11[]
diff --git a/tools/lib/perf/Documentation/examples/sampling.c b/tools/lib/perf/Documentation/examples/sampling.c
new file mode 100644 (file)
index 0000000..8e1a926
--- /dev/null
@@ -0,0 +1,119 @@
+#include <linux/perf_event.h>
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <perf/cpumap.h>
+#include <perf/threadmap.h>
+#include <perf/mmap.h>
+#include <perf/core.h>
+#include <perf/event.h>
+#include <stdio.h>
+#include <unistd.h>
+
+static int libperf_print(enum libperf_print_level level,
+                         const char *fmt, va_list ap)
+{
+       return vfprintf(stderr, fmt, ap);
+}
+
+union u64_swap {
+       __u64 val64;
+       __u32 val32[2];
+};
+
+int main(int argc, char **argv)
+{
+       struct perf_evlist *evlist;
+       struct perf_evsel *evsel;
+       struct perf_mmap *map;
+       struct perf_cpu_map *cpus;
+       struct perf_event_attr attr = {
+               .type        = PERF_TYPE_HARDWARE,
+               .config      = PERF_COUNT_HW_CPU_CYCLES,
+               .disabled    = 1,
+               .freq        = 1,
+               .sample_freq = 10,
+               .sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_TID|PERF_SAMPLE_CPU|PERF_SAMPLE_PERIOD,
+       };
+       int err = -1;
+       union perf_event *event;
+
+       libperf_init(libperf_print);
+
+       cpus = perf_cpu_map__new(NULL);
+       if (!cpus) {
+               fprintf(stderr, "failed to create cpus\n");
+               return -1;
+       }
+
+       evlist = perf_evlist__new();
+       if (!evlist) {
+               fprintf(stderr, "failed to create evlist\n");
+               goto out_cpus;
+       }
+
+       evsel = perf_evsel__new(&attr);
+       if (!evsel) {
+               fprintf(stderr, "failed to create cycles\n");
+               goto out_cpus;
+       }
+
+       perf_evlist__add(evlist, evsel);
+
+       perf_evlist__set_maps(evlist, cpus, NULL);
+
+       err = perf_evlist__open(evlist);
+       if (err) {
+               fprintf(stderr, "failed to open evlist\n");
+               goto out_evlist;
+       }
+
+       err = perf_evlist__mmap(evlist, 4);
+       if (err) {
+               fprintf(stderr, "failed to mmap evlist\n");
+               goto out_evlist;
+       }
+
+       perf_evlist__enable(evlist);
+       sleep(3);
+       perf_evlist__disable(evlist);
+
+       perf_evlist__for_each_mmap(evlist, map, false) {
+               if (perf_mmap__read_init(map) < 0)
+                       continue;
+
+               while ((event = perf_mmap__read_event(map)) != NULL) {
+                       int cpu, pid, tid;
+                       __u64 ip, period, *array;
+                       union u64_swap u;
+
+                       array = event->sample.array;
+
+                       ip = *array;
+                       array++;
+
+                       u.val64 = *array;
+                       pid = u.val32[0];
+                       tid = u.val32[1];
+                       array++;
+
+                       u.val64 = *array;
+                       cpu = u.val32[0];
+                       array++;
+
+                       period = *array;
+
+                       fprintf(stdout, "cpu %3d, pid %6d, tid %6d, ip %20llx, period %20llu\n",
+                               cpu, pid, tid, ip, period);
+
+                       perf_mmap__consume(map);
+               }
+
+               perf_mmap__read_done(map);
+       }
+
+out_evlist:
+       perf_evlist__delete(evlist);
+out_cpus:
+       perf_cpu_map__put(cpus);
+       return err;
+}
diff --git a/tools/lib/perf/Documentation/libperf-counting.txt b/tools/lib/perf/Documentation/libperf-counting.txt
new file mode 100644 (file)
index 0000000..cae9757
--- /dev/null
@@ -0,0 +1,211 @@
+libperf-counting(7)
+===================
+
+NAME
+----
+libperf-counting - counting interface
+
+DESCRIPTION
+-----------
+The counting interface provides API to meassure and get count for specific perf events.
+
+The following test tries to explain count on `counting.c` example.
+
+It is by no means complete guide to counting, but shows libperf basic API for counting.
+
+The `counting.c` comes with libbperf package and can be compiled and run like:
+
+[source,bash]
+--
+$ gcc -o counting counting.c -lperf
+$ sudo ./counting
+count 176792, enabled 176944, run 176944
+count 176242, enabled 176242, run 176242
+--
+
+It requires root access, because of the `PERF_COUNT_SW_CPU_CLOCK` event,
+which is available only for root.
+
+The `counting.c` example monitors two events on the current process and displays their count, in a nutshel it:
+
+* creates events
+* adds them to the event list
+* opens and enables events through the event list
+* does some workload
+* disables events
+* reads and displays event counts
+* destroys the event list
+
+The first thing you need to do before using libperf is to call init function:
+
+[source,c]
+--
+  8 static int libperf_print(enum libperf_print_level level,
+  9                          const char *fmt, va_list ap)
+ 10 {
+ 11         return vfprintf(stderr, fmt, ap);
+ 12 }
+
+ 14 int main(int argc, char **argv)
+ 15 {
+ ...
+ 35         libperf_init(libperf_print);
+--
+
+It will setup the library and sets function for debug output from library.
+
+The `libperf_print` callback will receive any message with its debug level,
+defined as:
+
+[source,c]
+--
+enum libperf_print_level {
+        LIBPERF_ERR,
+        LIBPERF_WARN,
+        LIBPERF_INFO,
+        LIBPERF_DEBUG,
+        LIBPERF_DEBUG2,
+        LIBPERF_DEBUG3,
+};
+--
+
+Once the setup is complete we start by defining specific events using the `struct perf_event_attr`.
+
+We create software events for cpu and task:
+
+[source,c]
+--
+ 20         struct perf_event_attr attr1 = {
+ 21                 .type        = PERF_TYPE_SOFTWARE,
+ 22                 .config      = PERF_COUNT_SW_CPU_CLOCK,
+ 23                 .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
+ 24                 .disabled    = 1,
+ 25         };
+ 26         struct perf_event_attr attr2 = {
+ 27                 .type        = PERF_TYPE_SOFTWARE,
+ 28                 .config      = PERF_COUNT_SW_TASK_CLOCK,
+ 29                 .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
+ 30                 .disabled    = 1,
+ 31         };
+--
+
+The `read_format` setup tells perf to include timing details together with each count.
+
+Next step is to prepare threads map.
+
+In this case we will monitor current process, so we create threads map with single pid (0):
+
+[source,c]
+--
+ 37         threads = perf_thread_map__new_dummy();
+ 38         if (!threads) {
+ 39                 fprintf(stderr, "failed to create threads\n");
+ 40                 return -1;
+ 41         }
+ 42
+ 43         perf_thread_map__set_pid(threads, 0, 0);
+--
+
+Now we create libperf's event list, which will serve as holder for the events we want:
+
+[source,c]
+--
+ 45         evlist = perf_evlist__new();
+ 46         if (!evlist) {
+ 47                 fprintf(stderr, "failed to create evlist\n");
+ 48                 goto out_threads;
+ 49         }
+--
+
+We create libperf's events for the attributes we defined earlier and add them to the list:
+
+[source,c]
+--
+ 51         evsel = perf_evsel__new(&attr1);
+ 52         if (!evsel) {
+ 53                 fprintf(stderr, "failed to create evsel1\n");
+ 54                 goto out_evlist;
+ 55         }
+ 56
+ 57         perf_evlist__add(evlist, evsel);
+ 58
+ 59         evsel = perf_evsel__new(&attr2);
+ 60         if (!evsel) {
+ 61                 fprintf(stderr, "failed to create evsel2\n");
+ 62                 goto out_evlist;
+ 63         }
+ 64
+ 65         perf_evlist__add(evlist, evsel);
+--
+
+Configure event list with the thread map and open events:
+
+[source,c]
+--
+ 67         perf_evlist__set_maps(evlist, NULL, threads);
+ 68
+ 69         err = perf_evlist__open(evlist);
+ 70         if (err) {
+ 71                 fprintf(stderr, "failed to open evsel\n");
+ 72                 goto out_evlist;
+ 73         }
+--
+
+Both events are created as disabled (note the `disabled = 1` assignment above),
+so we need to enable the whole list explicitely (both events).
+
+From this moment events are counting and we can do our workload.
+
+When we are done we disable the events list.
+
+[source,c]
+--
+ 75         perf_evlist__enable(evlist);
+ 76
+ 77         while (count--);
+ 78
+ 79         perf_evlist__disable(evlist);
+--
+
+Now we need to get the counts from events, following code iterates throught the events list and read counts:
+
+[source,c]
+--
+ 81         perf_evlist__for_each_evsel(evlist, evsel) {
+ 82                 perf_evsel__read(evsel, 0, 0, &counts);
+ 83                 fprintf(stdout, "count %llu, enabled %llu, run %llu\n",
+ 84                         counts.val, counts.ena, counts.run);
+ 85         }
+--
+
+And finaly cleanup.
+
+We close the whole events list (both events) and remove it together with the threads map:
+
+[source,c]
+--
+ 87         perf_evlist__close(evlist);
+ 88
+ 89 out_evlist:
+ 90         perf_evlist__delete(evlist);
+ 91 out_threads:
+ 92         perf_thread_map__put(threads);
+ 93         return err;
+ 94 }
+--
+
+REPORTING BUGS
+--------------
+Report bugs to <linux-perf-users@vger.kernel.org>.
+
+LICENSE
+-------
+libperf is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+SEE ALSO
+--------
+libperf(3), libperf-sampling(7)
diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt
new file mode 100644 (file)
index 0000000..d71a7b4
--- /dev/null
@@ -0,0 +1,243 @@
+libperf-sampling(7)
+===================
+
+NAME
+----
+libperf-sampling - sampling interface
+
+
+DESCRIPTION
+-----------
+The sampling interface provides API to meassure and get count for specific perf events.
+
+The following test tries to explain count on `sampling.c` example.
+
+It is by no means complete guide to sampling, but shows libperf basic API for sampling.
+
+The `sampling.c` comes with libbperf package and can be compiled and run like:
+
+[source,bash]
+--
+$ gcc -o sampling sampling.c -lperf
+$ sudo ./sampling
+cpu   0, pid      0, tid      0, ip     ffffffffad06c4e6, period                    1
+cpu   0, pid   4465, tid   4469, ip     ffffffffad118748, period             18322959
+cpu   0, pid      0, tid      0, ip     ffffffffad115722, period             33544846
+cpu   0, pid   4465, tid   4470, ip         7f84fe0cdad6, period             23687474
+cpu   0, pid      0, tid      0, ip     ffffffffad9e0349, period             34255790
+cpu   0, pid   4465, tid   4469, ip     ffffffffad136581, period             38664069
+cpu   0, pid      0, tid      0, ip     ffffffffad9e55e2, period             21922384
+cpu   0, pid   4465, tid   4470, ip         7f84fe0ebebf, period             17655175
+...
+--
+
+It requires root access, because it uses hardware cycles event.
+
+The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a nutshel it:
+
+- creates events
+- adds them to the event list
+- opens and enables events through the event list
+- sleeps for 3 seconds
+- disables events
+- reads and displays recorded samples
+- destroys the event list
+
+The first thing you need to do before using libperf is to call init function:
+
+[source,c]
+--
+ 12 static int libperf_print(enum libperf_print_level level,
+ 13                          const char *fmt, va_list ap)
+ 14 {
+ 15         return vfprintf(stderr, fmt, ap);
+ 16 }
+
+ 23 int main(int argc, char **argv)
+ 24 {
+ ...
+ 40         libperf_init(libperf_print);
+--
+
+It will setup the library and sets function for debug output from library.
+
+The `libperf_print` callback will receive any message with its debug level,
+defined as:
+
+[source,c]
+--
+enum libperf_print_level {
+        LIBPERF_ERR,
+        LIBPERF_WARN,
+        LIBPERF_INFO,
+        LIBPERF_DEBUG,
+        LIBPERF_DEBUG2,
+        LIBPERF_DEBUG3,
+};
+--
+
+Once the setup is complete we start by defining cycles event using the `struct perf_event_attr`:
+
+[source,c]
+--
+ 29         struct perf_event_attr attr = {
+ 30                 .type        = PERF_TYPE_HARDWARE,
+ 31                 .config      = PERF_COUNT_HW_CPU_CYCLES,
+ 32                 .disabled    = 1,
+ 33                 .freq        = 1,
+ 34                 .sample_freq = 10,
+ 35                 .sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_TID|PERF_SAMPLE_CPU|PERF_SAMPLE_PERIOD,
+ 36         };
+--
+
+Next step is to prepare cpus map.
+
+In this case we will monitor all the available CPUs:
+
+[source,c]
+--
+ 42         cpus = perf_cpu_map__new(NULL);
+ 43         if (!cpus) {
+ 44                 fprintf(stderr, "failed to create cpus\n");
+ 45                 return -1;
+ 46         }
+--
+
+Now we create libperf's event list, which will serve as holder for the cycles event:
+
+[source,c]
+--
+ 48         evlist = perf_evlist__new();
+ 49         if (!evlist) {
+ 50                 fprintf(stderr, "failed to create evlist\n");
+ 51                 goto out_cpus;
+ 52         }
+--
+
+We create libperf's event for the cycles attribute we defined earlier and add it to the list:
+
+[source,c]
+--
+ 54         evsel = perf_evsel__new(&attr);
+ 55         if (!evsel) {
+ 56                 fprintf(stderr, "failed to create cycles\n");
+ 57                 goto out_cpus;
+ 58         }
+ 59
+ 60         perf_evlist__add(evlist, evsel);
+--
+
+Configure event list with the cpus map and open event:
+
+[source,c]
+--
+ 62         perf_evlist__set_maps(evlist, cpus, NULL);
+ 63
+ 64         err = perf_evlist__open(evlist);
+ 65         if (err) {
+ 66                 fprintf(stderr, "failed to open evlist\n");
+ 67                 goto out_evlist;
+ 68         }
+--
+
+Once the events list is open, we can create memory maps AKA perf ring buffers:
+
+[source,c]
+--
+ 70         err = perf_evlist__mmap(evlist, 4);
+ 71         if (err) {
+ 72                 fprintf(stderr, "failed to mmap evlist\n");
+ 73                 goto out_evlist;
+ 74         }
+--
+
+The event is created as disabled (note the `disabled = 1` assignment above),
+so we need to enable the events list explicitely.
+
+From this moment the cycles event is sampling.
+
+We will sleep for 3 seconds while the ring buffers get data from all CPUs, then we disable the events list.
+
+[source,c]
+--
+ 76         perf_evlist__enable(evlist);
+ 77         sleep(3);
+ 78         perf_evlist__disable(evlist);
+--
+
+Following code walks through the ring buffers and reads stored events/samples:
+
+[source,c]
+--
+ 80         perf_evlist__for_each_mmap(evlist, map, false) {
+ 81                 if (perf_mmap__read_init(map) < 0)
+ 82                         continue;
+ 83
+ 84                 while ((event = perf_mmap__read_event(map)) != NULL) {
+
+                            /* process event */
+
+108                         perf_mmap__consume(map);
+109                 }
+110                 perf_mmap__read_done(map);
+111         }
+
+--
+
+Each sample needs to get parsed:
+
+[source,c]
+--
+ 85                         int cpu, pid, tid;
+ 86                         __u64 ip, period, *array;
+ 87                         union u64_swap u;
+ 88
+ 89                         array = event->sample.array;
+ 90
+ 91                         ip = *array;
+ 92                         array++;
+ 93
+ 94                         u.val64 = *array;
+ 95                         pid = u.val32[0];
+ 96                         tid = u.val32[1];
+ 97                         array++;
+ 98
+ 99                         u.val64 = *array;
+100                         cpu = u.val32[0];
+101                         array++;
+102
+103                         period = *array;
+104
+105                         fprintf(stdout, "cpu %3d, pid %6d, tid %6d, ip %20llx, period %20llu\n",
+106                                 cpu, pid, tid, ip, period);
+--
+
+And finaly cleanup.
+
+We close the whole events list (both events) and remove it together with the threads map:
+
+[source,c]
+--
+113 out_evlist:
+114         perf_evlist__delete(evlist);
+115 out_cpus:
+116         perf_cpu_map__put(cpus);
+117         return err;
+118 }
+--
+
+REPORTING BUGS
+--------------
+Report bugs to <linux-perf-users@vger.kernel.org>.
+
+LICENSE
+-------
+libperf is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+SEE ALSO
+--------
+libperf(3), libperf-counting(7)
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
new file mode 100644 (file)
index 0000000..5a6bb51
--- /dev/null
@@ -0,0 +1,246 @@
+libperf(3)
+==========
+
+NAME
+----
+libperf - Linux kernel perf event library
+
+
+SYNOPSIS
+--------
+*Generic API:*
+
+[source,c]
+--
+  #include <perf/core.h>
+
+  enum libperf_print_level {
+          LIBPERF_ERR,
+          LIBPERF_WARN,
+          LIBPERF_INFO,
+          LIBPERF_DEBUG,
+          LIBPERF_DEBUG2,
+          LIBPERF_DEBUG3,
+  };
+
+  typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
+                                    const char *, va_list ap);
+
+  void libperf_init(libperf_print_fn_t fn);
+--
+
+*API to handle cpu maps:*
+
+[source,c]
+--
+  #include <perf/cpumap.h>
+
+  struct perf_cpu_map;
+
+  struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+  struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
+  struct perf_cpu_map *perf_cpu_map__read(FILE *file);
+  struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
+  struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
+                                           struct perf_cpu_map *other);
+  void perf_cpu_map__put(struct perf_cpu_map *map);
+  int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+  int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
+  bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+  int perf_cpu_map__max(struct perf_cpu_map *map);
+
+  #define perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+--
+
+*API to handle thread maps:*
+
+[source,c]
+--
+  #include <perf/threadmap.h>
+
+  struct perf_thread_map;
+
+  struct perf_thread_map *perf_thread_map__new_dummy(void);
+
+  void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
+  char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
+  int perf_thread_map__nr(struct perf_thread_map *threads);
+  pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread);
+
+  struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
+  void perf_thread_map__put(struct perf_thread_map *map);
+--
+
+*API to handle event lists:*
+
+[source,c]
+--
+  #include <perf/evlist.h>
+
+  struct perf_evlist;
+
+  void perf_evlist__add(struct perf_evlist *evlist,
+                        struct perf_evsel *evsel);
+  void perf_evlist__remove(struct perf_evlist *evlist,
+                           struct perf_evsel *evsel);
+  struct perf_evlist *perf_evlist__new(void);
+  void perf_evlist__delete(struct perf_evlist *evlist);
+  struct perf_evsel* perf_evlist__next(struct perf_evlist *evlist,
+                                       struct perf_evsel *evsel);
+  int perf_evlist__open(struct perf_evlist *evlist);
+  void perf_evlist__close(struct perf_evlist *evlist);
+  void perf_evlist__enable(struct perf_evlist *evlist);
+  void perf_evlist__disable(struct perf_evlist *evlist);
+
+  #define perf_evlist__for_each_evsel(evlist, pos)
+
+  void perf_evlist__set_maps(struct perf_evlist *evlist,
+                             struct perf_cpu_map *cpus,
+                             struct perf_thread_map *threads);
+  int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
+  int perf_evlist__filter_pollfd(struct perf_evlist *evlist,
+                                 short revents_and_mask);
+
+  int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
+  void perf_evlist__munmap(struct perf_evlist *evlist);
+
+  struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
+                                           struct perf_mmap *map,
+                                           bool overwrite);
+
+  #define perf_evlist__for_each_mmap(evlist, pos, overwrite)
+--
+
+*API to handle events:*
+
+[source,c]
+--
+  #include <perf/evsel.h>*
+
+  struct perf_evsel;
+
+  struct perf_counts_values {
+          union {
+                  struct {
+                          uint64_t val;
+                          uint64_t ena;
+                          uint64_t run;
+                  };
+                  uint64_t values[3];
+          };
+  };
+
+  struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr);
+  void perf_evsel__delete(struct perf_evsel *evsel);
+  int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
+                       struct perf_thread_map *threads);
+  void perf_evsel__close(struct perf_evsel *evsel);
+  void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
+  int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+                       struct perf_counts_values *count);
+  int perf_evsel__enable(struct perf_evsel *evsel);
+  int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
+  int perf_evsel__disable(struct perf_evsel *evsel);
+  int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
+  struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
+  struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
+  struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
+--
+
+*API to handle maps (perf ring buffers):*
+
+[source,c]
+--
+  #include <perf/mmap.h>
+
+  struct perf_mmap;
+
+  void perf_mmap__consume(struct perf_mmap *map);
+  int perf_mmap__read_init(struct perf_mmap *map);
+  void perf_mmap__read_done(struct perf_mmap *map);
+  union perf_event *perf_mmap__read_event(struct perf_mmap *map);
+--
+
+*Structures to access perf API events:*
+
+[source,c]
+--
+  #include <perf/event.h>
+
+  struct perf_record_mmap;
+  struct perf_record_mmap2;
+  struct perf_record_comm;
+  struct perf_record_namespaces;
+  struct perf_record_fork;
+  struct perf_record_lost;
+  struct perf_record_lost_samples;
+  struct perf_record_read;
+  struct perf_record_throttle;
+  struct perf_record_ksymbol;
+  struct perf_record_bpf_event;
+  struct perf_record_sample;
+  struct perf_record_switch;
+  struct perf_record_header_attr;
+  struct perf_record_record_cpu_map;
+  struct perf_record_cpu_map_data;
+  struct perf_record_cpu_map;
+  struct perf_record_event_update_cpus;
+  struct perf_record_event_update_scale;
+  struct perf_record_event_update;
+  struct perf_trace_event_type;
+  struct perf_record_header_event_type;
+  struct perf_record_header_tracing_data;
+  struct perf_record_header_build_id;
+  struct perf_record_id_index;
+  struct perf_record_auxtrace_info;
+  struct perf_record_auxtrace;
+  struct perf_record_auxtrace_error;
+  struct perf_record_aux;
+  struct perf_record_itrace_start;
+  struct perf_record_thread_map_entry;
+  struct perf_record_thread_map;
+  struct perf_record_stat_config_entry;
+  struct perf_record_stat_config;
+  struct perf_record_stat;
+  struct perf_record_stat_round;
+  struct perf_record_time_conv;
+  struct perf_record_header_feature;
+  struct perf_record_compressed;
+--
+
+DESCRIPTION
+-----------
+The libperf library provides an API to access the linux kernel perf
+events subsystem.
+
+Following objects are key to the libperf interface:
+
+[horizontal]
+
+struct perf_cpu_map:: Provides a cpu list abstraction.
+
+struct perf_thread_map:: Provides a thread list abstraction.
+
+struct perf_evsel:: Provides an abstraction for single a perf event.
+
+struct perf_evlist:: Gathers several struct perf_evsel object and performs functions on all of them.
+
+struct perf_mmap:: Provides an abstraction for accessing perf ring buffer.
+
+The exported API functions bind these objects together.
+
+REPORTING BUGS
+--------------
+Report bugs to <linux-perf-users@vger.kernel.org>.
+
+LICENSE
+-------
+libperf is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+SEE ALSO
+--------
+libperf-sampling(7), libperf-counting(7)
diff --git a/tools/lib/perf/Documentation/manpage-1.72.xsl b/tools/lib/perf/Documentation/manpage-1.72.xsl
new file mode 100644 (file)
index 0000000..b4d315c
--- /dev/null
@@ -0,0 +1,14 @@
+<!-- manpage-1.72.xsl:
+     special settings for manpages rendered from asciidoc+docbook
+     handles peculiarities in docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the special values for the roff control characters
+     needed for docbook-xsl 1.72.0 -->
+<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
+<xsl:param name="git.docbook.dot"      >&#x2302;</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-base.xsl b/tools/lib/perf/Documentation/manpage-base.xsl
new file mode 100644 (file)
index 0000000..a264fa6
--- /dev/null
@@ -0,0 +1,35 @@
+<!-- manpage-base.xsl:
+     special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<!-- these params silence some output from xmlto -->
+<xsl:param name="man.output.quietly" select="1"/>
+<xsl:param name="refentry.meta.get.quietly" select="1"/>
+
+<!-- convert asciidoc callouts to man page format;
+     git.docbook.backslash and git.docbook.dot params
+     must be supplied by another XSL file or other means -->
+<xsl:template match="co">
+       <xsl:value-of select="concat(
+                             $git.docbook.backslash,'fB(',
+                             substring-after(@id,'-'),')',
+                             $git.docbook.backslash,'fR')"/>
+</xsl:template>
+<xsl:template match="calloutlist">
+       <xsl:value-of select="$git.docbook.dot"/>
+       <xsl:text>sp&#10;</xsl:text>
+       <xsl:apply-templates/>
+       <xsl:text>&#10;</xsl:text>
+</xsl:template>
+<xsl:template match="callout">
+       <xsl:value-of select="concat(
+                             $git.docbook.backslash,'fB',
+                             substring-after(@arearefs,'-'),
+                             '. ',$git.docbook.backslash,'fR')"/>
+       <xsl:apply-templates/>
+       <xsl:value-of select="$git.docbook.dot"/>
+       <xsl:text>br&#10;</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-bold-literal.xsl b/tools/lib/perf/Documentation/manpage-bold-literal.xsl
new file mode 100644 (file)
index 0000000..608eb5d
--- /dev/null
@@ -0,0 +1,17 @@
+<!-- manpage-bold-literal.xsl:
+     special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<!-- render literal text as bold (instead of plain or monospace);
+     this makes literal text easier to distinguish in manpages
+     viewed on a tty -->
+<xsl:template match="literal">
+       <xsl:value-of select="$git.docbook.backslash"/>
+       <xsl:text>fB</xsl:text>
+       <xsl:apply-templates/>
+       <xsl:value-of select="$git.docbook.backslash"/>
+       <xsl:text>fR</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-normal.xsl b/tools/lib/perf/Documentation/manpage-normal.xsl
new file mode 100644 (file)
index 0000000..a48f5b1
--- /dev/null
@@ -0,0 +1,13 @@
+<!-- manpage-normal.xsl:
+     special settings for manpages rendered from asciidoc+docbook
+     handles anything we want to keep away from docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the normal values for the roff control characters -->
+<xsl:param name="git.docbook.backslash">\</xsl:param>
+<xsl:param name="git.docbook.dot"      >.</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/perf/Documentation/manpage-suppress-sp.xsl b/tools/lib/perf/Documentation/manpage-suppress-sp.xsl
new file mode 100644 (file)
index 0000000..a63c763
--- /dev/null
@@ -0,0 +1,21 @@
+<!-- manpage-suppress-sp.xsl:
+     special settings for manpages rendered from asciidoc+docbook
+     handles erroneous, inline .sp in manpage output of some
+     versions of docbook-xsl -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<!-- attempt to work around spurious .sp at the tail of the line
+     that some versions of docbook stylesheets seem to add -->
+<xsl:template match="simpara">
+  <xsl:variable name="content">
+    <xsl:apply-templates/>
+  </xsl:variable>
+  <xsl:value-of select="normalize-space($content)"/>
+  <xsl:if test="not(ancestor::authorblurb) and
+                not(ancestor::personblurb)">
+    <xsl:text>&#10;&#10;</xsl:text>
+  </xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
similarity index 96%
rename from tools/perf/lib/Makefile
rename to tools/lib/perf/Makefile
index 0f233638ef1fb44de2617b21c428e74ee6141c96..3718d65cffac79ffbbf74917c14cc6b37073768e 100644 (file)
@@ -60,7 +60,7 @@ else
 endif
 
 INCLUDES = \
--I$(srctree)/tools/perf/lib/include \
+-I$(srctree)/tools/lib/perf/include \
 -I$(srctree)/tools/lib/ \
 -I$(srctree)/tools/include \
 -I$(srctree)/tools/arch/$(SRCARCH)/include/ \
@@ -181,7 +181,10 @@ install_pkgconfig: $(LIBPERF_PC)
        $(call QUIET_INSTALL, $(LIBPERF_PC)) \
                $(call do_install,$(LIBPERF_PC),$(libdir_SQ)/pkgconfig,644)
 
-install: install_lib install_headers install_pkgconfig
+install_doc:
+       $(Q)$(MAKE) -C Documentation install-man install-html install-examples
+
+install: install_lib install_headers install_pkgconfig install_doc
 
 FORCE:
 
similarity index 100%
rename from tools/perf/lib/core.c
rename to tools/lib/perf/core.c
similarity index 77%
rename from tools/perf/lib/cpumap.c
rename to tools/lib/perf/cpumap.c
index 2ca1fafa620dfca8ee992b87eb35fe5c36220d0f..f93f4e703e4c76e44a0341ca1273646fd53ece7d 100644 (file)
@@ -68,14 +68,28 @@ static struct perf_cpu_map *cpu_map__default_new(void)
        return cpus;
 }
 
+static int cmp_int(const void *a, const void *b)
+{
+       return *(const int *)a - *(const int*)b;
+}
+
 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
 {
        size_t payload_size = nr_cpus * sizeof(int);
        struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
+       int i, j;
 
        if (cpus != NULL) {
-               cpus->nr = nr_cpus;
                memcpy(cpus->map, tmp_cpus, payload_size);
+               qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
+               /* Remove dups */
+               j = 0;
+               for (i = 0; i < nr_cpus; i++) {
+                       if (i == 0 || cpus->map[i] != cpus->map[i - 1])
+                               cpus->map[j++] = cpus->map[i];
+               }
+               cpus->nr = j;
+               assert(j <= nr_cpus);
                refcount_set(&cpus->refcnt, 1);
        }
 
@@ -272,3 +286,60 @@ int perf_cpu_map__max(struct perf_cpu_map *map)
 
        return max;
 }
+
+/*
+ * Merge two cpumaps
+ *
+ * orig either gets freed and replaced with a new map, or reused
+ * with no reference count change (similar to "realloc")
+ * other has its reference count increased.
+ */
+
+struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
+                                        struct perf_cpu_map *other)
+{
+       int *tmp_cpus;
+       int tmp_len;
+       int i, j, k;
+       struct perf_cpu_map *merged;
+
+       if (!orig && !other)
+               return NULL;
+       if (!orig) {
+               perf_cpu_map__get(other);
+               return other;
+       }
+       if (!other)
+               return orig;
+       if (orig->nr == other->nr &&
+           !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
+               return orig;
+
+       tmp_len = orig->nr + other->nr;
+       tmp_cpus = malloc(tmp_len * sizeof(int));
+       if (!tmp_cpus)
+               return NULL;
+
+       /* Standard merge algorithm from wikipedia */
+       i = j = k = 0;
+       while (i < orig->nr && j < other->nr) {
+               if (orig->map[i] <= other->map[j]) {
+                       if (orig->map[i] == other->map[j])
+                               j++;
+                       tmp_cpus[k++] = orig->map[i++];
+               } else
+                       tmp_cpus[k++] = other->map[j++];
+       }
+
+       while (i < orig->nr)
+               tmp_cpus[k++] = orig->map[i++];
+
+       while (j < other->nr)
+               tmp_cpus[k++] = other->map[j++];
+       assert(k <= tmp_len);
+
+       merged = cpu_map__trim_new(k, tmp_cpus);
+       free(tmp_cpus);
+       perf_cpu_map__put(orig);
+       return merged;
+}
similarity index 99%
rename from tools/perf/lib/evlist.c
rename to tools/lib/perf/evlist.c
index 205ddbb80bc1fab0745406438add906e57671519..5b9f2ca50591dd2a5b5b79d24d4e4bee979c1d84 100644 (file)
@@ -54,6 +54,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
 
        perf_thread_map__put(evsel->threads);
        evsel->threads = perf_thread_map__get(evlist->threads);
+       evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
 }
 
 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
@@ -163,6 +164,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
                evlist->threads = perf_thread_map__get(threads);
        }
 
+       if (!evlist->all_cpus && cpus)
+               evlist->all_cpus = perf_cpu_map__get(cpus);
+
        perf_evlist__propagate_maps(evlist);
 }
 
similarity index 77%
rename from tools/perf/lib/evsel.c
rename to tools/lib/perf/evsel.c
index 5a89857b0381ce23aaceb9ddd2f44647b8bfb2af..4dc06289f4c75334619705ffa19bef32f6ebb504 100644 (file)
@@ -114,16 +114,23 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
        return err;
 }
 
+static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
+{
+       int thread;
+
+       for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
+               if (FD(evsel, cpu, thread) >= 0)
+                       close(FD(evsel, cpu, thread));
+               FD(evsel, cpu, thread) = -1;
+       }
+}
+
 void perf_evsel__close_fd(struct perf_evsel *evsel)
 {
-       int cpu, thread;
+       int cpu;
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
-               for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
-                       if (FD(evsel, cpu, thread) >= 0)
-                               close(FD(evsel, cpu, thread));
-                       FD(evsel, cpu, thread) = -1;
-               }
+               perf_evsel__close_fd_cpu(evsel, cpu);
 }
 
 void perf_evsel__free_fd(struct perf_evsel *evsel)
@@ -141,6 +148,14 @@ void perf_evsel__close(struct perf_evsel *evsel)
        perf_evsel__free_fd(evsel);
 }
 
+void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
+{
+       if (evsel->fd == NULL)
+               return;
+
+       perf_evsel__close_fd_cpu(evsel, cpu);
+}
+
 int perf_evsel__read_size(struct perf_evsel *evsel)
 {
        u64 read_format = evsel->attr.read_format;
@@ -183,38 +198,61 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
 }
 
 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
-                                int ioc,  void *arg)
+                                int ioc,  void *arg,
+                                int cpu)
 {
-       int cpu, thread;
+       int thread;
 
-       for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
-               for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread),
-                           err = ioctl(fd, ioc, arg);
+       for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+               int fd = FD(evsel, cpu, thread),
+                   err = ioctl(fd, ioc, arg);
 
-                       if (err)
-                               return err;
-               }
+               if (err)
+                       return err;
        }
 
        return 0;
 }
 
+int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
+{
+       return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
+}
+
 int perf_evsel__enable(struct perf_evsel *evsel)
 {
-       return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
+       int i;
+       int err = 0;
+
+       for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
+               err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
+       return err;
+}
+
+int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
+{
+       return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
 }
 
 int perf_evsel__disable(struct perf_evsel *evsel)
 {
-       return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
+       int i;
+       int err = 0;
+
+       for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
+               err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
+       return err;
 }
 
 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
 {
-       return perf_evsel__run_ioctl(evsel,
+       int err = 0, i;
+
+       for (i = 0; i < evsel->cpus->nr && !err; i++)
+               err = perf_evsel__run_ioctl(evsel,
                                     PERF_EVENT_IOC_SET_FILTER,
-                                    (void *)filter);
+                                    (void *)filter, i);
+       return err;
 }
 
 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
similarity index 99%
rename from tools/perf/lib/include/internal/evlist.h
rename to tools/lib/perf/include/internal/evlist.h
index a2fbccf1922f2894e8380234c740afe0952dfd20..74dc8c3f0b667f06f6f6c71d53b05b96e7ac11f1 100644 (file)
@@ -18,6 +18,7 @@ struct perf_evlist {
        int                      nr_entries;
        bool                     has_user_cpus;
        struct perf_cpu_map     *cpus;
+       struct perf_cpu_map     *all_cpus;
        struct perf_thread_map  *threads;
        int                      nr_mmaps;
        size_t                   mmap_len;
similarity index 89%
rename from tools/perf/lib/include/perf/cpumap.h
rename to tools/lib/perf/include/perf/cpumap.h
index ac9aa497f84ab3151799e1bc4689b5e0b0fd1dc4..6a17ad730cbc112b7e3e5938a4cc47b688d09440 100644 (file)
@@ -12,6 +12,8 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
+                                                    struct perf_cpu_map *other);
 LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
 LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
similarity index 84%
rename from tools/perf/lib/include/perf/evsel.h
rename to tools/lib/perf/include/perf/evsel.h
index 557f5815a9c968b21ed3c53e89cde7ccee435555..c82ec39a4ad064dc349fc65ce6feb7553442605b 100644 (file)
@@ -26,10 +26,13 @@ LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel);
 LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                                 struct perf_thread_map *threads);
 LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel);
+LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
 LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
                                 struct perf_counts_values *count);
 LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
+LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
 LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel);
+LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
 LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
 LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
 LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
similarity index 100%
rename from tools/perf/lib/lib.c
rename to tools/lib/perf/lib.c
similarity index 100%
rename from tools/perf/lib/mmap.c
rename to tools/lib/perf/mmap.c
similarity index 93%
rename from tools/perf/lib/tests/Makefile
rename to tools/lib/perf/tests/Makefile
index a43cd08c5c037a99edc6a798bef994f8356536fb..96841775feaf2b6d697963e96c955e37fcfceea0 100644 (file)
@@ -16,7 +16,7 @@ all:
 
 include $(srctree)/tools/scripts/Makefile.include
 
-INCLUDE = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include -I$(srctree)/tools/lib
+INCLUDE = -I$(srctree)/tools/lib/perf/include -I$(srctree)/tools/include -I$(srctree)/tools/lib
 
 $(TESTS_A): FORCE
        $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a $(LIBAPI)
index f2ae1b87c71952146c8ba03604ad44672808d97c..f645343815de631e345244044baaa43b23753e15 100644 (file)
@@ -96,6 +96,10 @@ int strtobool(const char *s, bool *res)
  * If libc has strlcpy() then that version will override this
  * implementation:
  */
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wignored-attributes"
+#endif
 size_t __weak strlcpy(char *dest, const char *src, size_t size)
 {
        size_t ret = strlen(src);
@@ -107,6 +111,9 @@ size_t __weak strlcpy(char *dest, const char *src, size_t size)
        }
        return ret;
 }
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
 
 /**
  * skip_spaces - Removes leading whitespace from @str.
index cbb429f550625a645650786b63b3cddc6c8ad6d0..c874c017c636969ef8b430213e89e78d18f9732c 100644 (file)
@@ -39,11 +39,12 @@ DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
 
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
 ifeq ($(LP64), 1)
-  libdir_relative = lib64
+  libdir_relative_temp = lib64
 else
-  libdir_relative = lib
+  libdir_relative_temp = lib
 endif
 
+libdir_relative ?= $(libdir_relative_temp)
 prefix ?= /usr/local
 libdir = $(prefix)/$(libdir_relative)
 man_dir = $(prefix)/share/man
@@ -97,6 +98,7 @@ EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
 
 LIB_TARGET  = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
 LIB_INSTALL = libtraceevent.a libtraceevent.so*
+LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL))
 
 INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
 
@@ -207,10 +209,11 @@ define do_install
        $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
 endef
 
-PKG_CONFIG_FILE = libtraceevent.pc
+PKG_CONFIG_SOURCE_FILE = libtraceevent.pc
+PKG_CONFIG_FILE := $(addprefix $(OUTPUT),$(PKG_CONFIG_SOURCE_FILE))
 define do_install_pkgconfig_file
        if [ -n "${pkgconfig_dir}" ]; then                                      \
-               cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE};           \
+               cp -f ${PKG_CONFIG_SOURCE_FILE}.template ${PKG_CONFIG_FILE};    \
                sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE};            \
                sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \
                sed -i "s|LIB_DIR|${libdir}|g" ${PKG_CONFIG_FILE}; \
index f3cbf86e51acf6e46ce70423e76b767798248754..20eed719542e5fcb9388d33cb93635db11c30f5e 100644 (file)
@@ -1228,8 +1228,10 @@ filter_event(struct tep_event_filter *filter, struct tep_event *event,
        }
 
        filter_type = add_filter_type(filter, event->id);
-       if (filter_type == NULL)
+       if (filter_type == NULL) {
+               free_arg(arg);
                return TEP_ERRNO__MEM_ALLOC_FAILED;
+       }
 
        if (filter_type->filter)
                free_arg(filter_type->filter);
index f440989fa55e443d9355659ec6f08c09c8c501b3..349bb81482abbccf099ae803ba8fe16e0e97b358 100644 (file)
@@ -32,11 +32,12 @@ DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
 
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
 ifeq ($(LP64), 1)
-  libdir_relative = lib64
+  libdir_relative_tmp = lib64
 else
-  libdir_relative = lib
+  libdir_relative_tmp = lib
 endif
 
+libdir_relative ?= $(libdir_relative_tmp)
 prefix ?= /usr/local
 libdir = $(prefix)/$(libdir_relative)
 
index e8c972f89357d2dc47bbe57000e503f46d964f37..1b5042f134a8679bf0d6f0e707a839cd76bfce38 100644 (file)
@@ -112,6 +112,12 @@ OPTIONS
 --objdump=<path>::
         Path to objdump binary.
 
+--prefix=PREFIX::
+--prefix-strip=N::
+       Remove first N entries from source file path names in executables
+       and add PREFIX. This allows to display source code compiled on systems
+       with different file system layout.
+
 --skip-missing::
        Skip symbols that cannot be annotated.
 
index 6a5bb2b170391da59b572f8c4230209fb04199e2..cf95baef7b61c1d2c116365d3135dd951fa43fad 100644 (file)
@@ -68,10 +68,11 @@ OPTIONS
 -------
 -i::
 --input=<path>::
-        Input file name.
+        Input file name, for the 'report', 'diff' and 'buildid-list' subcommands.
 -o::
 --output=<path>::
-        Output file name.
+        Output file name, for the 'record' subcommand. Doesn't work with 'report',
+        just redirect the output to a file when using 'report'.
 --host::
         Collect host side performance profile.
 --guest::
index 8dbe2119686aab71600a670287ff052c0489a9ec..db61f16ffa56490c0b19cf732220b90380b1ade3 100644 (file)
@@ -367,6 +367,12 @@ OPTIONS
 --objdump=<path>::
         Path to objdump binary.
 
+--prefix=PREFIX::
+--prefix-strip=N::
+       Remove first N entries from source file path names in executables
+       and add PREFIX. This allows to display source code compiled on systems
+       with different file system layout.
+
 --group::
        Show event group information together. It forces group output also
        if there are no groups defined in data file.
index 63f938b887dd135d31cc0e8928f650e23bbd3c0b..5fbe42bd599bfc479a8b012ca759527f39bd5f58 100644 (file)
@@ -110,6 +110,10 @@ OPTIONS for 'perf sched timehist'
 --max-stack::
        Maximum number of functions to display in backtrace, default 5.
 
+-C=::
+--cpu=::
+       Only show events for the given CPU(s) (comma separated list).
+
 -p=::
 --pid=::
        Only show events for given process ID (comma separated list).
index 5596129a71cf5d5136e2fba62114ebe9af411ea7..324b6b53c86b65d325dd6726bb35d82b3a03f823 100644 (file)
@@ -158,6 +158,12 @@ Default is to monitor all CPUS.
 -M::
 --disassembler-style=:: Set disassembler style for objdump.
 
+--prefix=PREFIX::
+--prefix-strip=N::
+        Remove first N entries from source file path names in executables
+        and add PREFIX. This allows to display source code compiled on systems
+        with different file system layout.
+
 --source::
        Interleave source code with assembly code. Enabled by default,
        disable with --no-source.
index 4934edb5adfd141cbc058736254685b941e4f710..5d7b947320fbed5595f6d4314b3d1a9a95b435e8 100644 (file)
@@ -7,6 +7,7 @@ tools/lib/traceevent
 tools/lib/api
 tools/lib/bpf
 tools/lib/subcmd
+tools/lib/perf
 tools/lib/argv_split.c
 tools/lib/ctype.c
 tools/lib/hweight.c
index c90f4146e5a2753e86a1b502be1f7db081055a4e..80e55e796be9cbf3d9e9cac86ee3d0b7770e51c7 100644 (file)
@@ -286,7 +286,7 @@ ifeq ($(DEBUG),0)
   endif
 endif
 
-INC_FLAGS += -I$(src-perf)/lib/include
+INC_FLAGS += -I$(srctree)/tools/lib/perf/include
 INC_FLAGS += -I$(src-perf)/util/include
 INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
 INC_FLAGS += -I$(srctree)/tools/include/
index eae5d5e95952765c186de6b2488c40542c31508b..3eda9d4b88e7da2a12a0171e706426034185d47d 100644 (file)
@@ -230,7 +230,7 @@ LIB_DIR         = $(srctree)/tools/lib/api/
 TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
 BPF_DIR         = $(srctree)/tools/lib/bpf/
 SUBCMD_DIR      = $(srctree)/tools/lib/subcmd/
-LIBPERF_DIR     = $(srctree)/tools/perf/lib/
+LIBPERF_DIR     = $(srctree)/tools/lib/perf/
 
 # Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
 # Without this setting the output feature dump file misses some features, for
index 6e2495cc4517191656784a9bf61a43653901e7cc..4284307d78226f2f76fa6673c6d22339ccca5bc0 100644 (file)
@@ -37,7 +37,7 @@
 
 .text
 .type perf_regs_load,%function
-ENTRY(perf_regs_load)
+SYM_FUNC_START(perf_regs_load)
        str r0, [r0, #R0]
        str r1, [r0, #R1]
        str r2, [r0, #R2]
@@ -56,4 +56,4 @@ ENTRY(perf_regs_load)
        str lr, [r0, #PC]       // store pc as lr in order to skip the call
                                //  to this function
        mov pc, lr
-ENDPROC(perf_regs_load)
+SYM_FUNC_END(perf_regs_load)
index 07042511dca925fc3cbf3ce9594476bce0d19b47..d49de40b6818021d1ba8a75522c4cbf2901fff76 100644 (file)
@@ -7,7 +7,7 @@
 #define LDR_REG(r)     ldr x##r, [x0, 8 * r]
 #define SP     (8 * 31)
 #define PC     (8 * 32)
-ENTRY(perf_regs_load)
+SYM_FUNC_START(perf_regs_load)
        STR_REG(0)
        STR_REG(1)
        STR_REG(2)
@@ -44,4 +44,4 @@ ENTRY(perf_regs_load)
        str x30, [x0, #PC]
        LDR_REG(1)
        ret
-ENDPROC(perf_regs_load)
+SYM_FUNC_END(perf_regs_load)
index bbe5a0d16e51083d5432c30f6a45dc8fcec7211f..80f14f52e3f6072fcc1ca179e544a2b945fac5e5 100644 (file)
@@ -28,7 +28,7 @@
 
 .text
 #ifdef HAVE_ARCH_X86_64_SUPPORT
-ENTRY(perf_regs_load)
+SYM_FUNC_START(perf_regs_load)
        movq %rax, AX(%rdi)
        movq %rbx, BX(%rdi)
        movq %rcx, CX(%rdi)
@@ -60,9 +60,9 @@ ENTRY(perf_regs_load)
        movq %r14, R14(%rdi)
        movq %r15, R15(%rdi)
        ret
-ENDPROC(perf_regs_load)
+SYM_FUNC_END(perf_regs_load)
 #else
-ENTRY(perf_regs_load)
+SYM_FUNC_START(perf_regs_load)
        push %edi
        movl 8(%esp), %edi
        movl %eax, AX(%edi)
@@ -88,7 +88,7 @@ ENTRY(perf_regs_load)
        movl $0, FS(%edi)
        movl $0, GS(%edi)
        ret
-ENDPROC(perf_regs_load)
+SYM_FUNC_END(perf_regs_load)
 #endif
 
 /*
index 5898662bc8fbc1bfaeacc04470f37139828e47ff..ff61795a4d13783011cd25682e4894b61da21643 100644 (file)
@@ -535,6 +535,10 @@ int cmd_annotate(int argc, const char **argv)
                    "Display raw encoding of assembly instructions (default)"),
        OPT_STRING('M', "disassembler-style", &annotate.opts.disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
+       OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix",
+                   "Add prefix to source file path names in programs (with --prefix-strip)"),
+       OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N",
+                   "Strip first N entries of source file path name in programs (with --prefix)"),
        OPT_STRING(0, "objdump", &annotate.opts.objdump_path, "path",
                   "objdump binary to use for disassembly and annotations"),
        OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
@@ -574,6 +578,9 @@ int cmd_annotate(int argc, const char **argv)
                annotate.sym_hist_filter = argv[0];
        }
 
+       if (annotate_check_args(&annotate.opts) < 0)
+               return -EINVAL;
+
        if (symbol_conf.show_nr_samples && annotate.use_gtk) {
                pr_err("--show-nr-samples is not available in --gtk mode at this time\n");
                return ret;
index e69f44941aad9a7bed7f4a3cbc0d218f84985081..246ac0b4d54fbfe851de11d4e412307bd915b9ca 100644 (file)
@@ -595,8 +595,8 @@ tot_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
 {
        struct c2c_hist_entry *c2c_left;
        struct c2c_hist_entry *c2c_right;
-       unsigned int tot_hitm_left;
-       unsigned int tot_hitm_right;
+       uint64_t tot_hitm_left;
+       uint64_t tot_hitm_right;
 
        c2c_left  = container_of(left, struct c2c_hist_entry, he);
        c2c_right = container_of(right, struct c2c_hist_entry, he);
@@ -629,7 +629,8 @@ __f ## _cmp(struct perf_hpp_fmt *fmt __maybe_unused,                        \
                                                                        \
        c2c_left  = container_of(left, struct c2c_hist_entry, he);      \
        c2c_right = container_of(right, struct c2c_hist_entry, he);     \
-       return c2c_left->stats.__f - c2c_right->stats.__f;              \
+       return (uint64_t) c2c_left->stats.__f -                         \
+              (uint64_t) c2c_right->stats.__f;                         \
 }
 
 #define STAT_FN(__f)           \
@@ -682,7 +683,8 @@ ld_llcmiss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
        c2c_left  = container_of(left, struct c2c_hist_entry, he);
        c2c_right = container_of(right, struct c2c_hist_entry, he);
 
-       return llc_miss(&c2c_left->stats) - llc_miss(&c2c_right->stats);
+       return (uint64_t) llc_miss(&c2c_left->stats) -
+              (uint64_t) llc_miss(&c2c_right->stats);
 }
 
 static uint64_t total_records(struct c2c_stats *stats)
@@ -2384,7 +2386,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
        c2c_browser__update_nr_entries(browser);
 
        while (1) {
-               key = hist_browser__run(browser, "? - help", true);
+               key = hist_browser__run(browser, "? - help", true, 0);
 
                switch (key) {
                case 's':
@@ -2453,7 +2455,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
        c2c_browser__update_nr_entries(browser);
 
        while (1) {
-               key = hist_browser__run(browser, "? - help", true);
+               key = hist_browser__run(browser, "? - help", true, 0);
 
                switch (key) {
                case 'q':
index 9664a72a089daf4d4551a83227b1f63fa1fe22b8..7e124a7b8bfdc4e42ba48846caeab4f66a46c41e 100644 (file)
@@ -403,17 +403,6 @@ static int perf_event__repipe_tracing_data(struct perf_session *session,
        return err;
 }
 
-static int perf_event__repipe_id_index(struct perf_session *session,
-                                      union perf_event *event)
-{
-       int err;
-
-       perf_event__repipe_synth(session->tool, event);
-       err = perf_event__process_id_index(session, event);
-
-       return err;
-}
-
 static int dso__read_build_id(struct dso *dso)
 {
        if (dso->has_build_id)
@@ -651,7 +640,7 @@ static int __cmd_inject(struct perf_inject *inject)
                inject->tool.comm           = perf_event__repipe_comm;
                inject->tool.namespaces     = perf_event__repipe_namespaces;
                inject->tool.exit           = perf_event__repipe_exit;
-               inject->tool.id_index       = perf_event__repipe_id_index;
+               inject->tool.id_index       = perf_event__process_id_index;
                inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
                inject->tool.auxtrace       = perf_event__process_auxtrace;
                inject->tool.aux            = perf_event__drop_aux;
index b5063d3b6fd077fd8b5b4d35936f44cd6609bae7..4c301466101baea9803f7a236fa1da3ed0396a80 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/string.h>
 #include <linux/time64.h>
 #include <linux/zalloc.h>
+#include <linux/bitmap.h>
 
 struct switch_output {
        bool             enabled;
@@ -93,7 +94,7 @@ struct record {
        bool                    timestamp_boundary;
        struct switch_output    switch_output;
        unsigned long long      samples;
-       cpu_set_t               affinity_mask;
+       struct mmap_cpu_mask    affinity_mask;
        unsigned long           output_max_size;        /* = 0: unlimited */
 };
 
@@ -832,7 +833,7 @@ try_again:
                        if ((errno == EINVAL || errno == EBADF) &&
                            pos->leader != pos &&
                            pos->weak_group) {
-                               pos = perf_evlist__reset_weak_group(evlist, pos);
+                               pos = perf_evlist__reset_weak_group(evlist, pos, true);
                                goto try_again;
                        }
                        rc = -errno;
@@ -961,10 +962,15 @@ static struct perf_event_header finished_round_event = {
 static void record__adjust_affinity(struct record *rec, struct mmap *map)
 {
        if (rec->opts.affinity != PERF_AFFINITY_SYS &&
-           !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
-               CPU_ZERO(&rec->affinity_mask);
-               CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
-               sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+           !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
+                         rec->affinity_mask.nbits)) {
+               bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
+               bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
+                         map->affinity_mask.bits, rec->affinity_mask.nbits);
+               sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
+                                 (cpu_set_t *)rec->affinity_mask.bits);
+               if (verbose == 2)
+                       mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
        }
 }
 
@@ -2433,7 +2439,6 @@ int cmd_record(int argc, const char **argv)
 # undef REASON
 #endif
 
-       CPU_ZERO(&rec->affinity_mask);
        rec->opts.affinity = PERF_AFFINITY_SYS;
 
        rec->evlist = evlist__new();
@@ -2499,6 +2504,16 @@ int cmd_record(int argc, const char **argv)
 
        symbol__init(NULL);
 
+       if (rec->opts.affinity != PERF_AFFINITY_SYS) {
+               rec->affinity_mask.nbits = cpu__max_cpu();
+               rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
+               if (!rec->affinity_mask.bits) {
+                       pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
+                       return -ENOMEM;
+               }
+               pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
+       }
+
        err = record__auxtrace_init(rec);
        if (err)
                goto out;
@@ -2613,6 +2628,7 @@ int cmd_record(int argc, const char **argv)
 
        err = __cmd_record(&record, argc, argv);
 out:
+       bitmap_free(rec->affinity_mask.bits);
        evlist__delete(rec->evlist);
        symbol__exit();
        auxtrace_record__free(rec->itr);
index 830d563de8898f92559af9dd5bcf5803f948cefa..9483b3f0cae3f50004d0a6ea9e4ede717d185627 100644 (file)
@@ -388,6 +388,14 @@ static int report__setup_sample_type(struct report *rep)
                }
        }
 
+       if (sort__mode == SORT_MODE__MEMORY) {
+               if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+                       ui__error("Selected --mem-mode but no mem data. "
+                                 "Did you call perf record without -d?\n");
+                       return -1;
+               }
+       }
+
        if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
                if ((sample_type & PERF_SAMPLE_REGS_USER) &&
                    (sample_type & PERF_SAMPLE_STACK_USER)) {
@@ -404,10 +412,10 @@ static int report__setup_sample_type(struct report *rep)
                                PERF_SAMPLE_BRANCH_ANY))
                rep->nonany_branch_mode = true;
 
-#ifndef HAVE_LIBUNWIND_SUPPORT
+#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
        if (dwarf_callchain_users) {
-               ui__warning("Please install libunwind development packages "
-                           "during the perf build.\n");
+               ui__warning("Please install libunwind or libdw "
+                           "development packages during the perf build.\n");
        }
 #endif
 
@@ -1068,6 +1076,7 @@ int cmd_report(int argc, const char **argv)
        struct stat st;
        bool has_br_stack = false;
        int branch_mode = -1;
+       int last_key = 0;
        bool branch_call_mode = false;
 #define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function,percent"
        static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
@@ -1155,7 +1164,8 @@ int cmd_report(int argc, const char **argv)
                             report_callchain_help, &report_parse_callchain_opt,
                             callchain_default_opt),
        OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
-                   "Accumulate callchains of children and show total overhead as well"),
+                   "Accumulate callchains of children and show total overhead as well. "
+                   "Enabled by default, use --no-children to disable."),
        OPT_INTEGER(0, "max-stack", &report.max_stack,
                    "Set the maximum stack depth when parsing the callchain, "
                    "anything beyond the specified depth will be ignored. "
@@ -1198,6 +1208,10 @@ int cmd_report(int argc, const char **argv)
                    "Display raw encoding of assembly instructions (default)"),
        OPT_STRING('M', "disassembler-style", &report.annotation_opts.disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
+       OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
+                   "Add prefix to source file path names in programs (with --prefix-strip)"),
+       OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N",
+                   "Strip first N entries of source file path name in programs (with --prefix)"),
        OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
                    "Show a column with the sum of periods"),
        OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
@@ -1277,6 +1291,9 @@ int cmd_report(int argc, const char **argv)
                report.symbol_filter_str = argv[0];
        }
 
+       if (annotate_check_args(&report.annotation_opts) < 0)
+               return -EINVAL;
+
        if (report.mmaps_mode)
                report.tasks_mode = true;
 
@@ -1442,7 +1459,8 @@ repeat:
                sort_order = sort_tmp;
        }
 
-       if (setup_sorting(session->evlist) < 0) {
+       if ((last_key != K_SWITCH_INPUT_DATA) &&
+           (setup_sorting(session->evlist) < 0)) {
                if (sort_order)
                        parse_options_usage(report_usage, options, "s", 1);
                if (field_order)
@@ -1522,6 +1540,7 @@ repeat:
        ret = __cmd_report(&report);
        if (ret == K_SWITCH_INPUT_DATA) {
                perf_session__delete(session);
+               last_key = K_SWITCH_INPUT_DATA;
                goto repeat;
        } else
                ret = 0;
index 8a12d71364c309ee88cbf76061c90ca046118f38..82fcc2c15fe469b1dc20cd6ed8c516c1d8d0858e 100644 (file)
@@ -51,6 +51,9 @@
 #define SYM_LEN                        129
 #define MAX_PID                        1024000
 
+static const char *cpu_list;
+static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+
 struct sched_atom;
 
 struct task_desc {
@@ -2008,6 +2011,9 @@ static void timehist_print_sample(struct perf_sched *sched,
        char nstr[30];
        u64 wait_time;
 
+       if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+               return;
+
        timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
        printf("%15s [%04d] ", tstr, sample->cpu);
 
@@ -2994,6 +3000,12 @@ static int perf_sched__timehist(struct perf_sched *sched)
        if (IS_ERR(session))
                return PTR_ERR(session);
 
+       if (cpu_list) {
+               err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+               if (err < 0)
+                       goto out;
+       }
+
        evlist = session->evlist;
 
        symbol__init(&session->header.env);
@@ -3429,6 +3441,7 @@ int cmd_sched(int argc, const char **argv)
                   "analyze events only for given process id(s)"),
        OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
                   "analyze events only for given thread id(s)"),
+       OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
        OPT_PARENT(sched_options)
        };
 
index 0a15253b438c5e413bfc01159fcb627f6c329650..a098c2ebf4eac474c69b554ac40a279fbfc8afa7 100644 (file)
@@ -65,6 +65,7 @@
 #include "util/target.h"
 #include "util/time-utils.h"
 #include "util/top.h"
+#include "util/affinity.h"
 #include "asm/bug.h"
 
 #include <linux/time64.h>
@@ -265,15 +266,10 @@ static int read_single_counter(struct evsel *counter, int cpu,
  * Read out the results of a single counter:
  * do not aggregate counts across CPUs in system-wide mode
  */
-static int read_counter(struct evsel *counter, struct timespec *rs)
+static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
 {
        int nthreads = perf_thread_map__nr(evsel_list->core.threads);
-       int ncpus, cpu, thread;
-
-       if (target__has_cpu(&target) && !target__has_per_thread(&target))
-               ncpus = perf_evsel__nr_cpus(counter);
-       else
-               ncpus = 1;
+       int thread;
 
        if (!counter->supported)
                return -ENOENT;
@@ -282,40 +278,38 @@ static int read_counter(struct evsel *counter, struct timespec *rs)
                nthreads = 1;
 
        for (thread = 0; thread < nthreads; thread++) {
-               for (cpu = 0; cpu < ncpus; cpu++) {
-                       struct perf_counts_values *count;
-
-                       count = perf_counts(counter->counts, cpu, thread);
-
-                       /*
-                        * The leader's group read loads data into its group members
-                        * (via perf_evsel__read_counter) and sets threir count->loaded.
-                        */
-                       if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
-                           read_single_counter(counter, cpu, thread, rs)) {
-                               counter->counts->scaled = -1;
-                               perf_counts(counter->counts, cpu, thread)->ena = 0;
-                               perf_counts(counter->counts, cpu, thread)->run = 0;
-                               return -1;
-                       }
+               struct perf_counts_values *count;
 
-                       perf_counts__set_loaded(counter->counts, cpu, thread, false);
+               count = perf_counts(counter->counts, cpu, thread);
 
-                       if (STAT_RECORD) {
-                               if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
-                                       pr_err("failed to write stat event\n");
-                                       return -1;
-                               }
-                       }
+               /*
+                * The leader's group read loads data into its group members
+                * (via perf_evsel__read_counter()) and sets their count->loaded.
+                */
+               if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
+                   read_single_counter(counter, cpu, thread, rs)) {
+                       counter->counts->scaled = -1;
+                       perf_counts(counter->counts, cpu, thread)->ena = 0;
+                       perf_counts(counter->counts, cpu, thread)->run = 0;
+                       return -1;
+               }
+
+               perf_counts__set_loaded(counter->counts, cpu, thread, false);
 
-                       if (verbose > 1) {
-                               fprintf(stat_config.output,
-                                       "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
-                                               perf_evsel__name(counter),
-                                               cpu,
-                                               count->val, count->ena, count->run);
+               if (STAT_RECORD) {
+                       if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
+                               pr_err("failed to write stat event\n");
+                               return -1;
                        }
                }
+
+               if (verbose > 1) {
+                       fprintf(stat_config.output,
+                               "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+                                       perf_evsel__name(counter),
+                                       cpu,
+                                       count->val, count->ena, count->run);
+               }
        }
 
        return 0;
@@ -324,15 +318,37 @@ static int read_counter(struct evsel *counter, struct timespec *rs)
 static void read_counters(struct timespec *rs)
 {
        struct evsel *counter;
-       int ret;
+       struct affinity affinity;
+       int i, ncpus, cpu;
+
+       if (affinity__setup(&affinity) < 0)
+               return;
+
+       ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
+       if (!target__has_cpu(&target) || target__has_per_thread(&target))
+               ncpus = 1;
+       evlist__for_each_cpu(evsel_list, i, cpu) {
+               if (i >= ncpus)
+                       break;
+               affinity__set(&affinity, cpu);
+
+               evlist__for_each_entry(evsel_list, counter) {
+                       if (evsel__cpu_iter_skip(counter, cpu))
+                               continue;
+                       if (!counter->err) {
+                               counter->err = read_counter_cpu(counter, rs,
+                                                               counter->cpu_iter - 1);
+                       }
+               }
+       }
+       affinity__cleanup(&affinity);
 
        evlist__for_each_entry(evsel_list, counter) {
-               ret = read_counter(counter, rs);
-               if (ret)
+               if (counter->err)
                        pr_debug("failed to read counter %s\n", counter->name);
-
-               if (ret == 0 && perf_stat_process_counter(&stat_config, counter))
+               if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
                        pr_warning("failed to process counter %s\n", counter->name);
+               counter->err = 0;
        }
 }
 
@@ -420,6 +436,62 @@ static bool is_target_alive(struct target *_target,
        return false;
 }
 
+enum counter_recovery {
+       COUNTER_SKIP,
+       COUNTER_RETRY,
+       COUNTER_FATAL,
+};
+
+static enum counter_recovery stat_handle_error(struct evsel *counter)
+{
+       char msg[BUFSIZ];
+       /*
+        * PPC returns ENXIO for HW counters until 2.6.37
+        * (behavior changed with commit b0a873e).
+        */
+       if (errno == EINVAL || errno == ENOSYS ||
+           errno == ENOENT || errno == EOPNOTSUPP ||
+           errno == ENXIO) {
+               if (verbose > 0)
+                       ui__warning("%s event is not supported by the kernel.\n",
+                                   perf_evsel__name(counter));
+               counter->supported = false;
+               /*
+                * errored is a sticky flag that means one of the counter's
+                * cpu event had a problem and needs to be reexamined.
+                */
+               counter->errored = true;
+
+               if ((counter->leader != counter) ||
+                   !(counter->leader->core.nr_members > 1))
+                       return COUNTER_SKIP;
+       } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
+               if (verbose > 0)
+                       ui__warning("%s\n", msg);
+               return COUNTER_RETRY;
+       } else if (target__has_per_thread(&target) &&
+                  evsel_list->core.threads &&
+                  evsel_list->core.threads->err_thread != -1) {
+               /*
+                * For global --per-thread case, skip current
+                * error thread.
+                */
+               if (!thread_map__remove(evsel_list->core.threads,
+                                       evsel_list->core.threads->err_thread)) {
+                       evsel_list->core.threads->err_thread = -1;
+                       return COUNTER_RETRY;
+               }
+       }
+
+       perf_evsel__open_strerror(counter, &target,
+                                 errno, msg, sizeof(msg));
+       ui__error("%s\n", msg);
+
+       if (child_pid != -1)
+               kill(child_pid, SIGTERM);
+       return COUNTER_FATAL;
+}
+
 static int __run_perf_stat(int argc, const char **argv, int run_idx)
 {
        int interval = stat_config.interval;
@@ -433,6 +505,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        int status = 0;
        const bool forks = (argc > 0);
        bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
+       struct affinity affinity;
+       int i, cpu;
+       bool second_pass = false;
 
        if (interval) {
                ts.tv_sec  = interval / USEC_PER_MSEC;
@@ -457,61 +532,104 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        if (group)
                perf_evlist__set_leader(evsel_list);
 
-       evlist__for_each_entry(evsel_list, counter) {
+       if (affinity__setup(&affinity) < 0)
+               return -1;
+
+       evlist__for_each_cpu (evsel_list, i, cpu) {
+               affinity__set(&affinity, cpu);
+
+               evlist__for_each_entry(evsel_list, counter) {
+                       if (evsel__cpu_iter_skip(counter, cpu))
+                               continue;
+                       if (counter->reset_group || counter->errored)
+                               continue;
 try_again:
-               if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
-
-                       /* Weak group failed. Reset the group. */
-                       if ((errno == EINVAL || errno == EBADF) &&
-                           counter->leader != counter &&
-                           counter->weak_group) {
-                               counter = perf_evlist__reset_weak_group(evsel_list, counter);
-                               goto try_again;
-                       }
+                       if (create_perf_stat_counter(counter, &stat_config, &target,
+                                                    counter->cpu_iter - 1) < 0) {
 
-                       /*
-                        * PPC returns ENXIO for HW counters until 2.6.37
-                        * (behavior changed with commit b0a873e).
-                        */
-                       if (errno == EINVAL || errno == ENOSYS ||
-                           errno == ENOENT || errno == EOPNOTSUPP ||
-                           errno == ENXIO) {
-                               if (verbose > 0)
-                                       ui__warning("%s event is not supported by the kernel.\n",
-                                                   perf_evsel__name(counter));
-                               counter->supported = false;
-
-                               if ((counter->leader != counter) ||
-                                   !(counter->leader->core.nr_members > 1))
-                                       continue;
-                       } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
-                                if (verbose > 0)
-                                        ui__warning("%s\n", msg);
-                                goto try_again;
-                       } else if (target__has_per_thread(&target) &&
-                                  evsel_list->core.threads &&
-                                  evsel_list->core.threads->err_thread != -1) {
                                /*
-                                * For global --per-thread case, skip current
-                                * error thread.
+                                * Weak group failed. We cannot just undo this here
+                                * because earlier CPUs might be in group mode, and the kernel
+                                * doesn't support mixing group and non group reads. Defer
+                                * it to later.
+                                * Don't close here because we're in the wrong affinity.
                                 */
-                               if (!thread_map__remove(evsel_list->core.threads,
-                                                       evsel_list->core.threads->err_thread)) {
-                                       evsel_list->core.threads->err_thread = -1;
+                               if ((errno == EINVAL || errno == EBADF) &&
+                                   counter->leader != counter &&
+                                   counter->weak_group) {
+                                       perf_evlist__reset_weak_group(evsel_list, counter, false);
+                                       assert(counter->reset_group);
+                                       second_pass = true;
+                                       continue;
+                               }
+
+                               switch (stat_handle_error(counter)) {
+                               case COUNTER_FATAL:
+                                       return -1;
+                               case COUNTER_RETRY:
                                        goto try_again;
+                               case COUNTER_SKIP:
+                                       continue;
+                               default:
+                                       break;
                                }
+
                        }
+                       counter->supported = true;
+               }
+       }
 
-                       perf_evsel__open_strerror(counter, &target,
-                                                 errno, msg, sizeof(msg));
-                       ui__error("%s\n", msg);
+       if (second_pass) {
+               /*
+                * Now redo all the weak group after closing them,
+                * and also close errored counters.
+                */
 
-                       if (child_pid != -1)
-                               kill(child_pid, SIGTERM);
+               evlist__for_each_cpu(evsel_list, i, cpu) {
+                       affinity__set(&affinity, cpu);
+                       /* First close errored or weak retry */
+                       evlist__for_each_entry(evsel_list, counter) {
+                               if (!counter->reset_group && !counter->errored)
+                                       continue;
+                               if (evsel__cpu_iter_skip_no_inc(counter, cpu))
+                                       continue;
+                               perf_evsel__close_cpu(&counter->core, counter->cpu_iter);
+                       }
+                       /* Now reopen weak */
+                       evlist__for_each_entry(evsel_list, counter) {
+                               if (!counter->reset_group && !counter->errored)
+                                       continue;
+                               if (evsel__cpu_iter_skip(counter, cpu))
+                                       continue;
+                               if (!counter->reset_group)
+                                       continue;
+try_again_reset:
+                               pr_debug2("reopening weak %s\n", perf_evsel__name(counter));
+                               if (create_perf_stat_counter(counter, &stat_config, &target,
+                                                            counter->cpu_iter - 1) < 0) {
+
+                                       switch (stat_handle_error(counter)) {
+                                       case COUNTER_FATAL:
+                                               return -1;
+                                       case COUNTER_RETRY:
+                                               goto try_again_reset;
+                                       case COUNTER_SKIP:
+                                               continue;
+                                       default:
+                                               break;
+                                       }
+                               }
+                               counter->supported = true;
+                       }
+               }
+       }
+       affinity__cleanup(&affinity);
 
-                       return -1;
+       evlist__for_each_entry(evsel_list, counter) {
+               if (!counter->supported) {
+                       perf_evsel__free_fd(&counter->core);
+                       continue;
                }
-               counter->supported = true;
 
                l = strlen(counter->unit);
                if (l > stat_config.unit_width)
index dc80044bc46f48cdd7123eced665a33017bd6343..8affcab756043dc4c31c43ccc43443446afaf37a 100644 (file)
@@ -1512,6 +1512,10 @@ int cmd_top(int argc, const char **argv)
                    "objdump binary to use for disassembly and annotations"),
        OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
+       OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
+                   "Add prefix to source file path names in programs (with --prefix-strip)"),
+       OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
+                   "Strip first N entries of source file path name in programs (with --prefix)"),
        OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
        OPT_CALLBACK(0, "percent-limit", &top, "percent",
                     "Don't show entries under that percent", parse_percent_limit),
@@ -1568,9 +1572,13 @@ int cmd_top(int argc, const char **argv)
         */
        status = perf_env__read_cpuid(&perf_env);
        if (status) {
-               pr_err("Couldn't read the cpuid for this machine: %s\n",
-                      str_error_r(errno, errbuf, sizeof(errbuf)));
-               goto out_delete_evlist;
+               /*
+                * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
+                * warn the user explicitely.
+                */
+               eprintf(status == ENOSYS ? 1 : 0, verbose,
+                       "Couldn't read the cpuid for this machine: %s\n",
+                       str_error_r(errno, errbuf, sizeof(errbuf)));
        }
        top.evlist->env = &perf_env;
 
@@ -1578,6 +1586,9 @@ int cmd_top(int argc, const char **argv)
        if (argc)
                usage_with_options(top_usage, options);
 
+       if (annotate_check_args(&top.annotation_opts) < 0)
+               goto out_delete_evlist;
+
        if (!top.evlist->core.nr_entries &&
            perf_evlist__add_default(top.evlist) < 0) {
                pr_err("Not enough memory for event selector list\n");
index a1dc1672435201513b3e3ad3a7bbbab59b4a32ec..68039a96c1dcaa7a2b9c5f54882ce39b98c71b0e 100755 (executable)
@@ -110,8 +110,8 @@ for i in $FILES; do
 done
 
 # diff with extra ignore lines
-check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
-check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
+check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
+check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
 check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
 check include/uapi/linux/mman.h       '-I "^#include <\(uapi/\)*asm/mman.h>"'
 check include/linux/ctype.h          '-I "isdigit("'
index b9c203219691a4758eff6578bd00ee7c09bec10b..e6b6181c6dc63b29abf7463c7bee52c048e5e112 100644 (file)
 
 #include <bpf.h>
 
-int probe(hrtimer_nanosleep, rqtp->tv_sec)(void *ctx, int err, long sec)
+#define NSEC_PER_SEC   1000000000L
+
+int probe(hrtimer_nanosleep, rqtp)(void *ctx, int err, long long sec)
 {
-       return sec == 5;
+       return sec / NSEC_PER_SEC == 5ULL;
 }
 
 license(GPL);
diff --git a/tools/perf/lib/Documentation/Makefile b/tools/perf/lib/Documentation/Makefile
deleted file mode 100644 (file)
index 586425a..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-       rst2man man/libperf.rst > man/libperf.7
-       rst2pdf tutorial/tutorial.rst
-
-clean:
-       rm -f man/libperf.7
-       rm -f tutorial/tutorial.pdf
diff --git a/tools/perf/lib/Documentation/man/libperf.rst b/tools/perf/lib/Documentation/man/libperf.rst
deleted file mode 100644 (file)
index 09a270f..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-libperf
-
-The libperf library provides an API to access the linux kernel perf
-events subsystem. It provides the following high level objects:
-
-  - struct perf_cpu_map
-  - struct perf_thread_map
-  - struct perf_evlist
-  - struct perf_evsel
-
-reference
-=========
-Function reference by header files:
-
-perf/core.h
------------
-.. code-block:: c
-
-  typedef int (\*libperf_print_fn_t)(enum libperf_print_level level,
-                                     const char \*, va_list ap);
-
-  void libperf_set_print(libperf_print_fn_t fn);
-
-perf/cpumap.h
--------------
-.. code-block:: c
-
-  struct perf_cpu_map \*perf_cpu_map__dummy_new(void);
-  struct perf_cpu_map \*perf_cpu_map__new(const char \*cpu_list);
-  struct perf_cpu_map \*perf_cpu_map__read(FILE \*file);
-  struct perf_cpu_map \*perf_cpu_map__get(struct perf_cpu_map \*map);
-  void perf_cpu_map__put(struct perf_cpu_map \*map);
-  int perf_cpu_map__cpu(const struct perf_cpu_map \*cpus, int idx);
-  int perf_cpu_map__nr(const struct perf_cpu_map \*cpus);
-  perf_cpu_map__for_each_cpu(cpu, idx, cpus)
-
-perf/threadmap.h
-----------------
-.. code-block:: c
-
-  struct perf_thread_map \*perf_thread_map__new_dummy(void);
-  void perf_thread_map__set_pid(struct perf_thread_map \*map, int thread, pid_t pid);
-  char \*perf_thread_map__comm(struct perf_thread_map \*map, int thread);
-  struct perf_thread_map \*perf_thread_map__get(struct perf_thread_map \*map);
-  void perf_thread_map__put(struct perf_thread_map \*map);
-
-perf/evlist.h
--------------
-.. code-block::
-
-  void perf_evlist__init(struct perf_evlist \*evlist);
-  void perf_evlist__add(struct perf_evlist \*evlist,
-                      struct perf_evsel \*evsel);
-  void perf_evlist__remove(struct perf_evlist \*evlist,
-                         struct perf_evsel \*evsel);
-  struct perf_evlist \*perf_evlist__new(void);
-  void perf_evlist__delete(struct perf_evlist \*evlist);
-  struct perf_evsel\* perf_evlist__next(struct perf_evlist \*evlist,
-                                     struct perf_evsel \*evsel);
-  int perf_evlist__open(struct perf_evlist \*evlist);
-  void perf_evlist__close(struct perf_evlist \*evlist);
-  void perf_evlist__enable(struct perf_evlist \*evlist);
-  void perf_evlist__disable(struct perf_evlist \*evlist);
-  perf_evlist__for_each_evsel(evlist, pos)
-  void perf_evlist__set_maps(struct perf_evlist \*evlist,
-                           struct perf_cpu_map \*cpus,
-                           struct perf_thread_map \*threads);
-
-perf/evsel.h
-------------
-.. code-block:: c
-
-  struct perf_counts_values {
-        union {
-                struct {
-                        uint64_t val;
-                        uint64_t ena;
-                        uint64_t run;
-                };
-                uint64_t values[3];
-        };
-  };
-
-  void perf_evsel__init(struct perf_evsel \*evsel,
-                      struct perf_event_attr \*attr);
-  struct perf_evsel \*perf_evsel__new(struct perf_event_attr \*attr);
-  void perf_evsel__delete(struct perf_evsel \*evsel);
-  int perf_evsel__open(struct perf_evsel \*evsel, struct perf_cpu_map \*cpus,
-                     struct perf_thread_map \*threads);
-  void perf_evsel__close(struct perf_evsel \*evsel);
-  int perf_evsel__read(struct perf_evsel \*evsel, int cpu, int thread,
-                     struct perf_counts_values \*count);
-  int perf_evsel__enable(struct perf_evsel \*evsel);
-  int perf_evsel__disable(struct perf_evsel \*evsel);
-  int perf_evsel__apply_filter(struct perf_evsel \*evsel, const char \*filter);
-  struct perf_cpu_map \*perf_evsel__cpus(struct perf_evsel \*evsel);
-  struct perf_thread_map \*perf_evsel__threads(struct perf_evsel \*evsel);
-  struct perf_event_attr \*perf_evsel__attr(struct perf_evsel \*evsel);
diff --git a/tools/perf/lib/Documentation/tutorial/tutorial.rst b/tools/perf/lib/Documentation/tutorial/tutorial.rst
deleted file mode 100644 (file)
index 7be7bc2..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-libperf tutorial
-================
-
-Compile and install libperf from kernel sources
-===============================================
-.. code-block:: bash
-
-  git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-  cd linux/tools/perf/lib
-  make
-  sudo make install prefix=/usr
-
-Libperf object
-==============
-The libperf library provides several high level objects:
-
-struct perf_cpu_map
-  Provides a cpu list abstraction.
-
-struct perf_thread_map
-  Provides a thread list abstraction.
-
-struct perf_evsel
-  Provides an abstraction for single a perf event.
-
-struct perf_evlist
-  Gathers several struct perf_evsel object and performs functions on all of them.
-
-The exported API binds these objects together,
-for full reference see the libperf.7 man page.
-
-Examples
-========
-Examples aim to explain libperf functionality on simple use cases.
-They are based in on a checked out linux kernel git tree:
-
-.. code-block:: bash
-
-  $ cd tools/perf/lib/Documentation/tutorial/
-  $ ls -d  ex-*
-  ex-1-compile  ex-2-evsel-stat  ex-3-evlist-stat
-
-ex-1-compile example
-====================
-This example shows the basic usage of *struct perf_cpu_map*,
-how to create it and display its cpus:
-
-.. code-block:: bash
-
-  $ cd ex-1-compile/
-  $ make
-  gcc -o test test.c -lperf
-  $ ./test
-  0 1 2 3 4 5 6 7
-
-
-The full code listing is here:
-
-.. code-block:: c
-
-   1 #include <perf/cpumap.h>
-   2
-   3 int main(int argc, char **Argv)
-   4 {
-   5         struct perf_cpu_map *cpus;
-   6         int cpu, tmp;
-   7
-   8         cpus = perf_cpu_map__new(NULL);
-   9
-  10         perf_cpu_map__for_each_cpu(cpu, tmp, cpus)
-  11                 fprintf(stdout, "%d ", cpu);
-  12
-  13         fprintf(stdout, "\n");
-  14
-  15         perf_cpu_map__put(cpus);
-  16         return 0;
-  17 }
-
-
-First you need to include the proper header to have *struct perf_cpumap*
-declaration and functions:
-
-.. code-block:: c
-
-   1 #include <perf/cpumap.h>
-
-
-The *struct perf_cpumap* object is created by *perf_cpu_map__new* call.
-The *NULL* argument asks it to populate the object with the current online CPUs list:
-
-.. code-block:: c
-
-   8         cpus = perf_cpu_map__new(NULL);
-
-This is paired with a *perf_cpu_map__put*, that drops its reference at the end, possibly deleting it.
-
-.. code-block:: c
-
-  15         perf_cpu_map__put(cpus);
-
-The iteration through the *struct perf_cpumap* CPUs is done using the *perf_cpu_map__for_each_cpu*
-macro which requires 3 arguments:
-
-- cpu  - the cpu numer
-- tmp  - iteration helper variable
-- cpus - the *struct perf_cpumap* object
-
-.. code-block:: c
-
-  10         perf_cpu_map__for_each_cpu(cpu, tmp, cpus)
-  11                 fprintf(stdout, "%d ", cpu);
-
-ex-2-evsel-stat example
-=======================
-
-TBD
-
-ex-3-evlist-stat example
-========================
-
-TBD
index 436ce33f1182e7d1721e760016f3b21c442df4ba..5da8296b667e753a032d9c7eed7fd974f1ea7a9b 100644 (file)
@@ -32,7 +32,7 @@
                "EventCode": "132",
                "EventName": "DTLB1_GPAGE_WRITES",
                "BriefDescription": "DTLB1 Two-Gigabyte Page Writes",
-               "PublicDescription": "Counter:132       Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page."
+               "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page."
        },
        {
                "Unit": "CPU-M-CF",
index 68618152ea2c62578368767fa1ddfd550b076015..89e070727e1bd388bc057d99c4a998c7dc6a701d 100644 (file)
@@ -4,7 +4,7 @@
                "EventCode": "128",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
        },
        {
                "Unit": "CPU-M-CF",
index bc7151d639d7ee7f7a7997b7edbeb2ae9738eea9..45a34ce4fe89f6d7250bc3eca62ab09a72edad9d 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 49c5f123d81154283b7a998ac73453af676352b1..961fe4395758e0f0893e55d03cd0826765b04419 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 113d19e9267840aeaf6dd0995461efedc710de3b..746734ce09be7fc32717fa3a0205a85e3b811e75 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 2ba32af9bc366a2cf72bb568989c8d253b541726..f94653229dd46ee26127ad1f0d6e2e840b68d810 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index c80f16fde6d0ee9248935cfc59d605f0a7717a37..5402cd3120f9d1a1f44e25f721941536e667a40c 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index e501729c3dd117d0a86d2606a487914a38fff429..832f3cb40b34f23cc82279025aef98c733dec7a2 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index e2446966b65137cf32d80a65ff3765fee73da0ad..d69b2a8fc0bc7fb601968c86b3b038f78d074dc1 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 9294769dec64e9880283307f59bfbae40353cc1e..5f465fd81315d90a8b6684cd2006f56c37836b7a 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 603ff9c2e9a17295ea6868cc3575313ee90a51b8..3e909b30600351b25fa90acccf1b19f957f8cdd0 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index c6b485b3a2cbad74eeea7cbf8611cfe954129abb..50c053235752042bd68adf7595bb948c46cf1634 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 0ca539bb60f62b631faafbb6d897873d29c70247..e7feb60f9fa995b3179a51581e810fd3697a4137 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index 047d7e11aa6f83aef22c5e54a13fbe46cf9e7375..21d7a0c2c2e83c76500f8189e94671b21334ce16 100644 (file)
     },
     {
         "BriefDescription": "Fraction of cycles spent in Kernel mode",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
index a3c595fba9434df3e1269b5f1cd5148ba04f88db..1692529639b0df28324735498dcac49a51891e63 100644 (file)
@@ -54,6 +54,7 @@ perf-y += unit_number__scnprintf.o
 perf-y += mem2node.o
 perf-y += maps.o
 perf-y += time-utils-test.o
+perf-y += genelf.o
 
 $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
        $(call rule_mkdir)
index 415903b48578f28290f43399f568182e7b32f26d..da8ec1e8e06481f9296c5ac90741071d07e89776 100644 (file)
@@ -263,20 +263,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
                if (count1 == 11)
                        pr_debug("failed: RF EFLAG recursion issue detected\n");
                else
-                       pr_debug("failed: wrong count for bp1%lld\n", count1);
+                       pr_debug("failed: wrong count for bp1: %lld, expected 1\n", count1);
        }
 
        if (overflows != 3)
-               pr_debug("failed: wrong overflow hit\n");
+               pr_debug("failed: wrong overflow (%d) hit, expected 3\n", overflows);
 
        if (overflows_2 != 3)
-               pr_debug("failed: wrong overflow_2 hit\n");
+               pr_debug("failed: wrong overflow_2 (%d) hit, expected 3\n", overflows_2);
 
        if (count2 != 3)
-               pr_debug("failed: wrong count for bp2\n");
+               pr_debug("failed: wrong count for bp2 (%lld), expected 3\n", count2);
 
        if (count3 != 2)
-               pr_debug("failed: wrong count for bp3\n");
+               pr_debug("failed: wrong count for bp3 (%lld), expected 2\n", count3);
 
        return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
                TEST_OK : TEST_FAIL;
index 7115aa32a51ee4b0ee4d2a270eeae5420fe277be..5f05db75cdd866be44b0702b440369df93cb6a68 100644 (file)
@@ -259,6 +259,11 @@ static struct test generic_tests[] = {
                .desc = "Print cpu map",
                .func = test__cpu_map_print,
        },
+       {
+               .desc = "Merge cpu map",
+               .func = test__cpu_map_merge,
+       },
+
        {
                .desc = "Probe SDT events",
                .func = test__sdt_event,
@@ -296,6 +301,10 @@ static struct test generic_tests[] = {
                .desc = "time utils",
                .func = test__time_utils,
        },
+       {
+               .desc = "Test jit_write_elf",
+               .func = test__jit_write_elf,
+       },
        {
                .desc = "maps__merge_in",
                .func = test__maps__merge_in,
index 8a0d236202b05ba179199b2709080b949c5d52d3..4ac56741ac5fe6375fb4e0a1171b7bbd5a7f7c42 100644 (file)
@@ -120,3 +120,19 @@ int test__cpu_map_print(struct test *test __maybe_unused, int subtest __maybe_un
        TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40"));
        return 0;
 }
+
+int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_unused)
+{
+       struct perf_cpu_map *a = perf_cpu_map__new("4,2,1");
+       struct perf_cpu_map *b = perf_cpu_map__new("4,5,7");
+       struct perf_cpu_map *c = perf_cpu_map__merge(a, b);
+       char buf[100];
+
+       TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5);
+       cpu_map__snprint(c, buf, sizeof(buf));
+       TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
+       perf_cpu_map__put(a);
+       perf_cpu_map__put(b);
+       perf_cpu_map__put(c);
+       return 0;
+}
index 1ee8704e22849726dd30a0aa74642515fd07f059..1e8a9f5c356dd623226c5fb7dee5e4f30b002b6f 100644 (file)
@@ -125,7 +125,7 @@ static int attach__cpu_disabled(struct evlist *evlist)
 
        evsel->core.attr.disabled = 1;
 
-       err = perf_evsel__open_per_cpu(evsel, cpus);
+       err = perf_evsel__open_per_cpu(evsel, cpus, -1);
        if (err) {
                if (err == -EACCES)
                        return TEST_SKIP;
@@ -152,7 +152,7 @@ static int attach__cpu_enabled(struct evlist *evlist)
                return -1;
        }
 
-       err = perf_evsel__open_per_cpu(evsel, cpus);
+       err = perf_evsel__open_per_cpu(evsel, cpus, -1);
        if (err == -EACCES)
                return TEST_SKIP;
 
diff --git a/tools/perf/tests/genelf.c b/tools/perf/tests/genelf.c
new file mode 100644 (file)
index 0000000..f797f98
--- /dev/null
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/compiler.h>
+
+#include "debug.h"
+#include "tests.h"
+
+#ifdef HAVE_JITDUMP
+#include <libelf.h>
+#include "../util/genelf.h"
+#endif
+
+#define TEMPL "/tmp/perf-test-XXXXXX"
+
+int test__jit_write_elf(struct test *test __maybe_unused,
+                       int subtest __maybe_unused)
+{
+#ifdef HAVE_JITDUMP
+       static unsigned char x86_code[] = {
+               0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */
+               0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */
+               0xCD, 0x80            /* int $0x80 */
+       };
+       char path[PATH_MAX];
+       int fd, ret;
+
+       strcpy(path, TEMPL);
+
+       fd = mkstemp(path);
+       if (fd < 0) {
+               perror("mkstemp failed");
+               return TEST_FAIL;
+       }
+
+       pr_info("Writing jit code to: %s\n", path);
+
+       ret = jit_write_elf(fd, 0, "main", x86_code, sizeof(x86_code),
+                       NULL, 0, NULL, 0, 0);
+       close(fd);
+
+       unlink(path);
+
+       return ret ? TEST_FAIL : 0;
+#else
+       return TEST_SKIP;
+#endif
+}
index 25aea387e2bf0ed13afde728df4ad906556557a8..9a160fef47c9c91d032b00701413e9521dae25ea 100644 (file)
@@ -98,6 +98,7 @@ int test__event_update(struct test *test, int subtest);
 int test__event_times(struct test *test, int subtest);
 int test__backward_ring_buffer(struct test *test, int subtest);
 int test__cpu_map_print(struct test *test, int subtest);
+int test__cpu_map_merge(struct test *test, int subtest);
 int test__sdt_event(struct test *test, int subtest);
 int test__is_printable_array(struct test *test, int subtest);
 int test__bitmap_print(struct test *test, int subtest);
@@ -109,6 +110,7 @@ int test__unit_number__scnprint(struct test *test, int subtest);
 int test__mem2node(struct test *t, int subtest);
 int test__maps__merge_in(struct test *t, int subtest);
 int test__time_utils(struct test *t, int subtest);
+int test__jit_write_elf(struct test *test, int subtest);
 
 bool test__bp_signal_is_supported(void);
 bool test__bp_account_is_supported(void);
index 1a8d3be2030e97220a698e8a1f8d99db55570240..062ca849c8fda9c0d032c154c380ddf067a34a26 100644 (file)
@@ -45,6 +45,7 @@ static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size,
        P_FLAG(NEWPID);
        P_FLAG(NEWNET);
        P_FLAG(IO);
+       P_FLAG(CLEAR_SIGHAND);
 #undef P_FLAG
 
        if (flags)
index 173c8f760763048a46bbbf9cafa0be7c52319fb9..e0c13e6a5788a8e6e824bb9bee9ab42137639a50 100644 (file)
@@ -72,5 +72,5 @@ size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg
        if (arg->augmented.args)
                return syscall_arg__scnprintf_augmented_sockaddr(arg, bf, size);
 
-       return scnprintf(bf, size, "%#x", arg->val);
+       return scnprintf(bf, size, "%#lx", arg->val);
 }
index d4d3558fdef427f825a7d80c146c541fffc8e8d7..f36dee4993201a4493830fd58f79a506891c3d3d 100644 (file)
@@ -18,7 +18,9 @@
 #include "../../util/evlist.h"
 #include "../../util/header.h"
 #include "../../util/hist.h"
+#include "../../util/machine.h"
 #include "../../util/map.h"
+#include "../../util/maps.h"
 #include "../../util/symbol.h"
 #include "../../util/map_symbol.h"
 #include "../../util/branch.h"
@@ -391,6 +393,57 @@ static void hist_entry__init_have_children(struct hist_entry *he)
        he->init_have_children = true;
 }
 
+static bool hist_browser__selection_has_children(struct hist_browser *browser)
+{
+       struct hist_entry *he = browser->he_selection;
+       struct map_symbol *ms = browser->selection;
+
+       if (!he || !ms)
+               return false;
+
+       if (ms == &he->ms)
+              return he->has_children;
+
+       return container_of(ms, struct callchain_list, ms)->has_children;
+}
+
+static bool hist_browser__he_selection_unfolded(struct hist_browser *browser)
+{
+       return browser->he_selection ? browser->he_selection->unfolded : false;
+}
+
+static bool hist_browser__selection_unfolded(struct hist_browser *browser)
+{
+       struct hist_entry *he = browser->he_selection;
+       struct map_symbol *ms = browser->selection;
+
+       if (!he || !ms)
+               return false;
+
+       if (ms == &he->ms)
+              return he->unfolded;
+
+       return container_of(ms, struct callchain_list, ms)->unfolded;
+}
+
+static char *hist_browser__selection_sym_name(struct hist_browser *browser, char *bf, size_t size)
+{
+       struct hist_entry *he = browser->he_selection;
+       struct map_symbol *ms = browser->selection;
+       struct callchain_list *callchain_entry;
+
+       if (!he || !ms)
+               return NULL;
+
+       if (ms == &he->ms) {
+              hist_entry__sym_snprintf(he, bf, size, 0);
+              return bf + 4; // skip the level, e.g. '[k] '
+       }
+
+       callchain_entry = container_of(ms, struct callchain_list, ms);
+       return callchain_list__sym_name(callchain_entry, bf, size, browser->show_dso);
+}
+
 static bool hist_browser__toggle_fold(struct hist_browser *browser)
 {
        struct hist_entry *he = browser->he_selection;
@@ -624,10 +677,81 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si
        return browser->title ? browser->title(browser, bf, size) : 0;
 }
 
+static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_lost_event, char *title, int key)
+{
+       switch (key) {
+       case K_TIMER: {
+               struct hist_browser_timer *hbt = browser->hbt;
+               u64 nr_entries;
+
+               WARN_ON_ONCE(!hbt);
+
+               if (hbt)
+                       hbt->timer(hbt->arg);
+
+               if (hist_browser__has_filter(browser) || symbol_conf.report_hierarchy)
+                       hist_browser__update_nr_entries(browser);
+
+               nr_entries = hist_browser__nr_entries(browser);
+               ui_browser__update_nr_entries(&browser->b, nr_entries);
+
+               if (warn_lost_event &&
+                   (browser->hists->stats.nr_lost_warned !=
+                   browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
+                       browser->hists->stats.nr_lost_warned =
+                               browser->hists->stats.nr_events[PERF_RECORD_LOST];
+                       ui_browser__warn_lost_events(&browser->b);
+               }
+
+               hist_browser__title(browser, title, sizeof(title));
+               ui_browser__show_title(&browser->b, title);
+               break;
+       }
+       case 'D': { /* Debug */
+               struct hist_entry *h = rb_entry(browser->b.top, struct hist_entry, rb_node);
+               static int seq;
+
+               ui_helpline__pop();
+               ui_helpline__fpush("%d: nr_ent=(%d,%d), etl: %d, rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
+                                  seq++, browser->b.nr_entries, browser->hists->nr_entries,
+                                  browser->b.extra_title_lines, browser->b.rows,
+                                  browser->b.index, browser->b.top_idx, h->row_offset, h->nr_rows);
+       }
+               break;
+       case 'C':
+               /* Collapse the whole world. */
+               hist_browser__set_folding(browser, false);
+               break;
+       case 'c':
+               /* Collapse the selected entry. */
+               hist_browser__set_folding_selected(browser, false);
+               break;
+       case 'E':
+               /* Expand the whole world. */
+               hist_browser__set_folding(browser, true);
+               break;
+       case 'e':
+               /* Expand the selected entry. */
+               hist_browser__set_folding_selected(browser, !hist_browser__he_selection_unfolded(browser));
+               break;
+       case 'H':
+               browser->show_headers = !browser->show_headers;
+               hist_browser__update_rows(browser);
+               break;
+       case '+':
+               if (hist_browser__toggle_fold(browser))
+                       break;
+               /* fall thru */
+       default:
+               return -1;
+       }
+
+       return 0;
+}
+
 int hist_browser__run(struct hist_browser *browser, const char *help,
-                     bool warn_lost_event)
+                     bool warn_lost_event, int key)
 {
-       int key;
        char title[160];
        struct hist_browser_timer *hbt = browser->hbt;
        int delay_secs = hbt ? hbt->refresh : 0;
@@ -640,79 +764,14 @@ int hist_browser__run(struct hist_browser *browser, const char *help,
        if (ui_browser__show(&browser->b, title, "%s", help) < 0)
                return -1;
 
+       if (key && hist_browser__handle_hotkey(browser, warn_lost_event, title, key))
+               goto out;
+
        while (1) {
                key = ui_browser__run(&browser->b, delay_secs);
 
-               switch (key) {
-               case K_TIMER: {
-                       u64 nr_entries;
-
-                       WARN_ON_ONCE(!hbt);
-
-                       if (hbt)
-                               hbt->timer(hbt->arg);
-
-                       if (hist_browser__has_filter(browser) ||
-                           symbol_conf.report_hierarchy)
-                               hist_browser__update_nr_entries(browser);
-
-                       nr_entries = hist_browser__nr_entries(browser);
-                       ui_browser__update_nr_entries(&browser->b, nr_entries);
-
-                       if (warn_lost_event &&
-                           (browser->hists->stats.nr_lost_warned !=
-                           browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
-                               browser->hists->stats.nr_lost_warned =
-                                       browser->hists->stats.nr_events[PERF_RECORD_LOST];
-                               ui_browser__warn_lost_events(&browser->b);
-                       }
-
-                       hist_browser__title(browser, title, sizeof(title));
-                       ui_browser__show_title(&browser->b, title);
-                       continue;
-               }
-               case 'D': { /* Debug */
-                       static int seq;
-                       struct hist_entry *h = rb_entry(browser->b.top,
-                                                       struct hist_entry, rb_node);
-                       ui_helpline__pop();
-                       ui_helpline__fpush("%d: nr_ent=(%d,%d), etl: %d, rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
-                                          seq++, browser->b.nr_entries,
-                                          browser->hists->nr_entries,
-                                          browser->b.extra_title_lines,
-                                          browser->b.rows,
-                                          browser->b.index,
-                                          browser->b.top_idx,
-                                          h->row_offset, h->nr_rows);
-               }
-                       break;
-               case 'C':
-                       /* Collapse the whole world. */
-                       hist_browser__set_folding(browser, false);
-                       break;
-               case 'c':
-                       /* Collapse the selected entry. */
-                       hist_browser__set_folding_selected(browser, false);
+               if (hist_browser__handle_hotkey(browser, warn_lost_event, title, key))
                        break;
-               case 'E':
-                       /* Expand the whole world. */
-                       hist_browser__set_folding(browser, true);
-                       break;
-               case 'e':
-                       /* Expand the selected entry. */
-                       hist_browser__set_folding_selected(browser, true);
-                       break;
-               case 'H':
-                       browser->show_headers = !browser->show_headers;
-                       hist_browser__update_rows(browser);
-                       break;
-               case K_ENTER:
-                       if (hist_browser__toggle_fold(browser))
-                               break;
-                       /* fall thru */
-               default:
-                       goto out;
-               }
        }
 out:
        ui_browser__hide(&browser->b);
@@ -2339,7 +2398,7 @@ close_file_and_continue:
        closedir(pwd_dir);
 
        if (nr_options) {
-               choice = ui__popup_menu(nr_options, options);
+               choice = ui__popup_menu(nr_options, options, NULL);
                if (choice < nr_options && choice >= 0) {
                        tmp = strdup(abs_path[choice]);
                        if (tmp) {
@@ -2411,7 +2470,8 @@ add_annotate_opt(struct hist_browser *browser __maybe_unused,
                 struct popup_action *act, char **optstr,
                 struct map_symbol *ms)
 {
-       if (ms->sym == NULL || ms->map->dso->annotate_warned)
+       if (ms->sym == NULL || ms->map->dso->annotate_warned ||
+           symbol__annotation(ms->sym)->src == NULL)
                return 0;
 
        if (asprintf(optstr, "Annotate %s", ms->sym->name) < 0)
@@ -2484,11 +2544,8 @@ add_thread_opt(struct hist_browser *browser, struct popup_action *act,
        return 1;
 }
 
-static int
-do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
+static int hists_browser__zoom_map(struct hist_browser *browser, struct map *map)
 {
-       struct map *map = act->ms.map;
-
        if (!hists__has(browser->hists, dso) || map == NULL)
                return 0;
 
@@ -2510,6 +2567,12 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
        return 0;
 }
 
+static int
+do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
+{
+       return hists_browser__zoom_map(browser, act->ms.map);
+}
+
 static int
 add_dso_opt(struct hist_browser *browser, struct popup_action *act,
            char **optstr, struct map *map)
@@ -2517,7 +2580,7 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act,
        if (!hists__has(browser->hists, dso) || map == NULL)
                return 0;
 
-       if (asprintf(optstr, "Zoom %s %s DSO",
+       if (asprintf(optstr, "Zoom %s %s DSO (use the 'k' hotkey to zoom directly into the kernel)",
                     browser->hists->dso_filter ? "out of" : "into",
                     __map__is_kernel(map) ? "the Kernel" : map->dso->short_name) < 0)
                return 0;
@@ -2527,6 +2590,28 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act,
        return 1;
 }
 
+static int do_toggle_callchain(struct hist_browser *browser, struct popup_action *act __maybe_unused)
+{
+       hist_browser__toggle_fold(browser);
+       return 0;
+}
+
+static int add_callchain_toggle_opt(struct hist_browser *browser, struct popup_action *act, char **optstr)
+{
+       char sym_name[512];
+
+        if (!hist_browser__selection_has_children(browser))
+                return 0;
+
+       if (asprintf(optstr, "%s [%s] callchain (one level, same as '+' hotkey, use 'e'/'c' for the whole main level entry)",
+                    hist_browser__selection_unfolded(browser) ? "Collapse" : "Expand",
+                    hist_browser__selection_sym_name(browser, sym_name, sizeof(sym_name))) < 0)
+               return 0;
+
+       act->fn = do_toggle_callchain;
+       return 1;
+}
+
 static int
 do_browse_map(struct hist_browser *browser __maybe_unused,
              struct popup_action *act)
@@ -2858,12 +2943,15 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
        "For symbolic views (--sort has sym):\n\n"                      \
        "ENTER         Zoom into DSO/Threads & Annotate current symbol\n" \
        "ESC           Zoom out\n"                                      \
+       "+             Expand/Collapse one callchain level\n"           \
        "a             Annotate current symbol\n"                       \
        "C             Collapse all callchains\n"                       \
        "d             Zoom into current DSO\n"                         \
+       "e             Expand/Collapse main entry callchains\n" \
        "E             Expand all callchains\n"                         \
        "F             Toggle percentage of filtered entries\n"         \
        "H             Display column headers\n"                        \
+       "k             Zoom into the kernel map\n"                      \
        "L             Change percent limit\n"                          \
        "m             Display context menu\n"                          \
        "S             Zoom into current Processor Socket\n"            \
@@ -2914,13 +3002,13 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
        while (1) {
                struct thread *thread = NULL;
                struct map *map = NULL;
-               int choice = 0;
+               int choice;
                int socked_id = -1;
 
-               nr_options = 0;
-
-               key = hist_browser__run(browser, helpline,
-                                       warn_lost_event);
+               key = 0; // reset key
+do_hotkey:              // key came straight from options ui__popup_menu()
+               choice = nr_options = 0;
+               key = hist_browser__run(browser, helpline, warn_lost_event, key);
 
                if (browser->he_selection != NULL) {
                        thread = hist_browser__selected_thread(browser);
@@ -2950,6 +3038,14 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
                            browser->selection->map->dso->annotate_warned)
                                continue;
 
+                       if (symbol__annotation(browser->selection->sym)->src == NULL) {
+                               ui_browser__warning(&browser->b, delay_secs * 2,
+                                                   "No samples for the \"%s\" symbol.\n\n"
+                                                   "Probably appeared just in a callchain",
+                                                   browser->selection->sym->name);
+                               continue;
+                       }
+
                        actions->ms.map = browser->selection->map;
                        actions->ms.sym = browser->selection->sym;
                        do_annotate(browser, actions);
@@ -2961,6 +3057,10 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
                        actions->ms.map = map;
                        do_zoom_dso(browser, actions);
                        continue;
+               case 'k':
+                       if (browser->selection != NULL)
+                               hists_browser__zoom_map(browser, browser->selection->maps->machine->vmlinux_map);
+                       continue;
                case 'V':
                        verbose = (verbose + 1) % 4;
                        browser->show_dso = verbose > 0;
@@ -3062,6 +3162,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
 
                                continue;
                        }
+                       actions->ms.map = map;
                        top = pstack__peek(browser->pstack);
                        if (top == &browser->hists->dso_filter) {
                                /*
@@ -3135,6 +3236,7 @@ skip_annotation:
                                             &options[nr_options], thread);
                nr_options += add_dso_opt(browser, &actions[nr_options],
                                          &options[nr_options], map);
+               nr_options += add_callchain_toggle_opt(browser, &actions[nr_options], &options[nr_options]);
                nr_options += add_map_opt(browser, &actions[nr_options],
                                          &options[nr_options],
                                          browser->selection ?
@@ -3193,10 +3295,13 @@ skip_scripting:
                do {
                        struct popup_action *act;
 
-                       choice = ui__popup_menu(nr_options, options);
-                       if (choice == -1 || choice >= nr_options)
+                       choice = ui__popup_menu(nr_options, options, &key);
+                       if (choice == -1)
                                break;
 
+                       if (choice == nr_options)
+                               goto do_hotkey;
+
                        act = &actions[choice];
                        key = act->fn(browser, act);
                } while (key == 1);
@@ -3492,7 +3597,7 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
        memset(&action, 0, sizeof(action));
 
        while (1) {
-               key = hist_browser__run(browser, "? - help", true);
+               key = hist_browser__run(browser, "? - help", true, 0);
 
                switch (key) {
                case 'q':
index 078f2f2c7abd2658403208d0b1d0229a3ad48778..1e938d9ffa5ee26177152840acdf73072db6c289 100644 (file)
@@ -34,7 +34,7 @@ struct hist_browser {
 struct hist_browser *hist_browser__new(struct hists *hists);
 void hist_browser__delete(struct hist_browser *browser);
 int hist_browser__run(struct hist_browser *browser, const char *help,
-                     bool warn_lost_event);
+                     bool warn_lost_event, int key);
 void hist_browser__init(struct hist_browser *browser,
                        struct hists *hists);
 #endif /* _PERF_UI_BROWSER_HISTS_H_ */
index 76d356a1879063956c50dae3010a94364920f314..7cb2d6678039749813c405ac1f515c7796ce8b53 100644 (file)
@@ -56,7 +56,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res,
                        return -1;
                }
        }
-       choice = ui__popup_menu(num_res, names);
+       choice = ui__popup_menu(num_res, names, NULL);
        for (i = 0; i < num_res; i++)
                zfree(&names[i]);
        free(names);
index fc733a6354d4dc4fc6f21e138dd7cbbe4378c20a..47d2c7a8cbe13cba1a3f9d46fd3c720cf0d2149c 100644 (file)
@@ -126,7 +126,7 @@ static int list_scripts(char *script_name, bool *custom,
                        SCRIPT_FULLPATH_LEN);
        if (num < 0)
                num = 0;
-       choice = ui__popup_menu(num + max_std, (char * const *)names);
+       choice = ui__popup_menu(num + max_std, (char * const *)names, NULL);
        if (choice < 0) {
                ret = -1;
                goto out;
index ec22e899a2240f2e86add99bfe2b203913a4259a..eef708c502f4346d7d55114bb0c0dbb51b0fd070 100644 (file)
@@ -1,4 +1,4 @@
-CFLAGS_gtk += -fPIC $(GTK_CFLAGS)
+CFLAGS_gtk += -fPIC $(GTK_CFLAGS) -Wno-deprecated-declarations
 
 gtk-y += browser.o
 gtk-y += hists.o
@@ -7,3 +7,8 @@ gtk-y += util.o
 gtk-y += helpline.o
 gtk-y += progress.o
 gtk-y += annotate.o
+gtk-y += zalloc.o
+
+$(OUTPUT)ui/gtk/zalloc.o: ../lib/zalloc.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
index b98dd0e31dc1aee049c3fabcf6dcee7649547a5f..0f562e2cb1e881181976ad1e1bbad77aa939c580 100644 (file)
@@ -23,7 +23,7 @@ static void ui_browser__argv_write(struct ui_browser *browser,
        ui_browser__write_nstring(browser, *arg, browser->width);
 }
 
-static int popup_menu__run(struct ui_browser *menu)
+static int popup_menu__run(struct ui_browser *menu, int *keyp)
 {
        int key;
 
@@ -45,6 +45,11 @@ static int popup_menu__run(struct ui_browser *menu)
                        key = -1;
                        break;
                default:
+                       if (keyp) {
+                               *keyp = key;
+                               key = menu->nr_entries;
+                               break;
+                       }
                        continue;
                }
 
@@ -55,7 +60,7 @@ static int popup_menu__run(struct ui_browser *menu)
        return key;
 }
 
-int ui__popup_menu(int argc, char * const argv[])
+int ui__popup_menu(int argc, char * const argv[], int *keyp)
 {
        struct ui_browser menu = {
                .entries    = (void *)argv,
@@ -64,8 +69,7 @@ int ui__popup_menu(int argc, char * const argv[])
                .write      = ui_browser__argv_write,
                .nr_entries = argc,
        };
-
-       return popup_menu__run(&menu);
+       return popup_menu__run(&menu, keyp);
 }
 
 int ui_browser__input_window(const char *title, const char *text, char *input,
index 40891942f465dc4737b89fbd77d6646bcf81b49d..e30cea807564f92ff26c316fad580323f70ad240 100644 (file)
@@ -5,7 +5,7 @@
 #include <stdarg.h>
 
 int ui__getch(int delay_secs);
-int ui__popup_menu(int argc, char * const argv[]);
+int ui__popup_menu(int argc, char * const argv[], int *keyp);
 int ui__help_window(const char *text);
 int ui__dialog_yesno(const char *msg);
 void __ui__info_window(const char *title, const char *text, const char *exit_msg);
index f5e77ed237e8f5d478abaef1dc3b9051f536b43f..ca73fb74ad03273464abe6bb86455140495542ca 100644 (file)
@@ -1966,14 +1966,20 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        err = asprintf(&command,
                 "%s %s%s --start-address=0x%016" PRIx64
                 " --stop-address=0x%016" PRIx64
-                " -l -d %s %s -C \"$1\"",
+                " -l -d %s %s %s %c%s%c %s%s -C \"$1\"",
                 opts->objdump_path ?: "objdump",
                 opts->disassembler_style ? "-M " : "",
                 opts->disassembler_style ?: "",
                 map__rip_2objdump(map, sym->start),
                 map__rip_2objdump(map, sym->end),
                 opts->show_asm_raw ? "" : "--no-show-raw-insn",
-                opts->annotate_src ? "-S" : "");
+                opts->annotate_src ? "-S" : "",
+                opts->prefix ? "--prefix " : "",
+                opts->prefix ? '"' : ' ',
+                opts->prefix ?: "",
+                opts->prefix ? '"' : ' ',
+                opts->prefix_strip ? "--prefix-strip=" : "",
+                opts->prefix_strip ?: "");
 
        if (err < 0) {
                pr_err("Failure allocating memory for the command to run\n");
@@ -3204,3 +3210,12 @@ out:
        free(str1);
        return err;
 }
+
+int annotate_check_args(struct annotation_options *args)
+{
+       if (args->prefix_strip && !args->prefix) {
+               pr_err("--prefix-strip requires --prefix\n");
+               return -1;
+       }
+       return 0;
+}
index 7075d98f69d91189eeb855ed3fc95c52a96e6a90..455403e8feded864661094b4c5fbb26fe8626492 100644 (file)
@@ -94,6 +94,8 @@ struct annotation_options {
        int  context;
        const char *objdump_path;
        const char *disassembler_style;
+       const char *prefix;
+       const char *prefix_strip;
        unsigned int percent_type;
 };
 
@@ -415,4 +417,7 @@ void annotation_config__init(void);
 
 int annotate_parse_percent_type(const struct option *opt, const char *_str,
                                int unset);
+
+int annotate_check_args(struct annotation_options *args);
+
 #endif /* __PERF_ANNOTATE_H */
index fc361c3f8570b3fe92bbfdd815d0fdbd32470eee..c8885dfa36671f625d661c487a0d956c09278bb2 100644 (file)
@@ -71,7 +71,11 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags,
        CompilerInstance Clang;
        Clang.createDiagnostics();
 
+#if CLANG_VERSION_MAJOR < 9
        Clang.setVirtualFileSystem(&*VFS);
+#else
+       Clang.createFileManager(&*VFS);
+#endif
 
 #if CLANG_VERSION_MAJOR < 4
        IntrusiveRefCntPtr<CompilerInvocation> CI =
index 57943f3685f8c84226a0cd970e9cbb448d8766e5..3a442f0214684cd85e4591e55aad713839a1947d 100644 (file)
@@ -63,4 +63,5 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res,
 
 int cpu_map__cpu(struct perf_cpu_map *cpus, int idx);
 bool cpu_map__has(struct perf_cpu_map *cpus, int cpu);
+
 #endif /* __PERF_CPUMAP_H */
index fdce590d22786ae21d65494fc67a3dc5a4ecb60b..1548237b6558d9105dcc07ae8094fb7f06dcc2b4 100644 (file)
@@ -18,6 +18,7 @@
 #include "debug.h"
 #include "units.h"
 #include <internal/lib.h> // page_size
+#include "affinity.h"
 #include "../perf.h"
 #include "asm/bug.h"
 #include "bpf-event.h"
@@ -342,14 +343,63 @@ static int perf_evlist__nr_threads(struct evlist *evlist,
                return perf_thread_map__nr(evlist->core.threads);
 }
 
+void evlist__cpu_iter_start(struct evlist *evlist)
+{
+       struct evsel *pos;
+
+       /*
+        * Reset the per evsel cpu_iter. This is needed because
+        * each evsel's cpumap may have a different index space,
+        * and some operations need the index to modify
+        * the FD xyarray (e.g. open, close)
+        */
+       evlist__for_each_entry(evlist, pos)
+               pos->cpu_iter = 0;
+}
+
+bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
+{
+       if (ev->cpu_iter >= ev->core.cpus->nr)
+               return true;
+       if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
+               return true;
+       return false;
+}
+
+bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
+{
+       if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
+               ev->cpu_iter++;
+               return false;
+       }
+       return true;
+}
+
 void evlist__disable(struct evlist *evlist)
 {
        struct evsel *pos;
+       struct affinity affinity;
+       int cpu, i;
+
+       if (affinity__setup(&affinity) < 0)
+               return;
 
+       evlist__for_each_cpu(evlist, i, cpu) {
+               affinity__set(&affinity, cpu);
+
+               evlist__for_each_entry(evlist, pos) {
+                       if (evsel__cpu_iter_skip(pos, cpu))
+                               continue;
+                       if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
+                               continue;
+                       evsel__disable_cpu(pos, pos->cpu_iter - 1);
+               }
+       }
+       affinity__cleanup(&affinity);
        evlist__for_each_entry(evlist, pos) {
-               if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
+               if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
                        continue;
-               evsel__disable(pos);
+               pos->disabled = true;
        }
 
        evlist->enabled = false;
@@ -358,11 +408,28 @@ void evlist__disable(struct evlist *evlist)
 void evlist__enable(struct evlist *evlist)
 {
        struct evsel *pos;
+       struct affinity affinity;
+       int cpu, i;
+
+       if (affinity__setup(&affinity) < 0)
+               return;
 
+       evlist__for_each_cpu(evlist, i, cpu) {
+               affinity__set(&affinity, cpu);
+
+               evlist__for_each_entry(evlist, pos) {
+                       if (evsel__cpu_iter_skip(pos, cpu))
+                               continue;
+                       if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
+                               continue;
+                       evsel__enable_cpu(pos, pos->cpu_iter - 1);
+               }
+       }
+       affinity__cleanup(&affinity);
        evlist__for_each_entry(evlist, pos) {
                if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
                        continue;
-               evsel__enable(pos);
+               pos->disabled = false;
        }
 
        evlist->enabled = true;
@@ -1137,9 +1204,35 @@ void perf_evlist__set_selected(struct evlist *evlist,
 void evlist__close(struct evlist *evlist)
 {
        struct evsel *evsel;
+       struct affinity affinity;
+       int cpu, i;
 
-       evlist__for_each_entry_reverse(evlist, evsel)
-               evsel__close(evsel);
+       /*
+        * With perf record core.cpus is usually NULL.
+        * Use the old method to handle this for now.
+        */
+       if (!evlist->core.cpus) {
+               evlist__for_each_entry_reverse(evlist, evsel)
+                       evsel__close(evsel);
+               return;
+       }
+
+       if (affinity__setup(&affinity) < 0)
+               return;
+       evlist__for_each_cpu(evlist, i, cpu) {
+               affinity__set(&affinity, cpu);
+
+               evlist__for_each_entry_reverse(evlist, evsel) {
+                       if (evsel__cpu_iter_skip(evsel, cpu))
+                           continue;
+                       perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
+               }
+       }
+       affinity__cleanup(&affinity);
+       evlist__for_each_entry_reverse(evlist, evsel) {
+               perf_evsel__free_fd(&evsel->core);
+               perf_evsel__free_id(&evsel->core);
+       }
 }
 
 static int perf_evlist__create_syswide_maps(struct evlist *evlist)
@@ -1577,7 +1670,8 @@ void perf_evlist__force_leader(struct evlist *evlist)
 }
 
 struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
-                                                struct evsel *evsel)
+                                                struct evsel *evsel,
+                                               bool close)
 {
        struct evsel *c2, *leader;
        bool is_open = true;
@@ -1594,10 +1688,15 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
                if (c2 == evsel)
                        is_open = false;
                if (c2->leader == leader) {
-                       if (is_open)
+                       if (is_open && close)
                                perf_evsel__close(&c2->core);
                        c2->leader = c2;
                        c2->core.nr_members = 0;
+                       /*
+                        * Set this for all former members of the group
+                        * to indicate they get reopened.
+                        */
+                       c2->reset_group = true;
                }
        }
        return leader;
index 3655b9ebb1473fccd594276f093603f9998b06f4..f5bd5c386df1138423313860c34ab331d34024a4 100644 (file)
@@ -334,9 +334,17 @@ void perf_evlist__to_front(struct evlist *evlist,
 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
        __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel)
 
+#define evlist__for_each_cpu(evlist, index, cpu)       \
+       evlist__cpu_iter_start(evlist);                 \
+       perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
+
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel);
 
+void evlist__cpu_iter_start(struct evlist *evlist);
+bool evsel__cpu_iter_skip(struct evsel *ev, int cpu);
+bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu);
+
 struct evsel *
 perf_evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
 
@@ -348,5 +356,6 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist);
 void perf_evlist__force_leader(struct evlist *evlist);
 
 struct evsel *perf_evlist__reset_weak_group(struct evlist *evlist,
-                                                struct evsel *evsel);
+                                                struct evsel *evsel,
+                                               bool close);
 #endif /* __PERF_EVLIST_H */
index f4dea055b0808941229374cf6a93d7992888b406..a69e64236120a7d4c8bf4ceb84434a439b51f6ac 100644 (file)
@@ -1223,16 +1223,27 @@ int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter)
        return perf_evsel__append_filter(evsel, "%s,%s", filter);
 }
 
+/* Caller has to clear disabled after going through all CPUs. */
+int evsel__enable_cpu(struct evsel *evsel, int cpu)
+{
+       return perf_evsel__enable_cpu(&evsel->core, cpu);
+}
+
 int evsel__enable(struct evsel *evsel)
 {
        int err = perf_evsel__enable(&evsel->core);
 
        if (!err)
                evsel->disabled = false;
-
        return err;
 }
 
+/* Caller has to set disabled after going through all CPUs. */
+int evsel__disable_cpu(struct evsel *evsel, int cpu)
+{
+       return perf_evsel__disable_cpu(&evsel->core, cpu);
+}
+
 int evsel__disable(struct evsel *evsel)
 {
        int err = perf_evsel__disable(&evsel->core);
@@ -1587,8 +1598,9 @@ static int perf_event_open(struct evsel *evsel,
        return fd;
 }
 
-int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
-               struct perf_thread_map *threads)
+static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
+               struct perf_thread_map *threads,
+               int start_cpu, int end_cpu)
 {
        int cpu, thread, nthreads;
        unsigned long flags = PERF_FLAG_FD_CLOEXEC;
@@ -1665,7 +1677,7 @@ retry_sample_id:
 
        display_attr(&evsel->core.attr);
 
-       for (cpu = 0; cpu < cpus->nr; cpu++) {
+       for (cpu = start_cpu; cpu < end_cpu; cpu++) {
 
                for (thread = 0; thread < nthreads; thread++) {
                        int fd, group_fd;
@@ -1843,6 +1855,12 @@ out_close:
        return err;
 }
 
+int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
+               struct perf_thread_map *threads)
+{
+       return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1);
+}
+
 void evsel__close(struct evsel *evsel)
 {
        perf_evsel__close(&evsel->core);
@@ -1850,9 +1868,14 @@ void evsel__close(struct evsel *evsel)
 }
 
 int perf_evsel__open_per_cpu(struct evsel *evsel,
-                            struct perf_cpu_map *cpus)
+                            struct perf_cpu_map *cpus,
+                            int cpu)
 {
-       return evsel__open(evsel, cpus, NULL);
+       if (cpu == -1)
+               return evsel__open_cpu(evsel, cpus, NULL, 0,
+                                       cpus ? cpus->nr : 1);
+
+       return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1);
 }
 
 int perf_evsel__open_per_thread(struct evsel *evsel,
index ddc5ee6f6592bed23532aff20f75c7a76db53361..dc14f4a823cd6a1cee5e2f9edae77cb2ac2e92bb 100644 (file)
@@ -86,6 +86,7 @@ struct evsel {
        struct list_head        config_terms;
        struct bpf_object       *bpf_obj;
        int                     bpf_fd;
+       int                     err;
        bool                    auto_merge_stats;
        bool                    merged_stat;
        const char *            metric_expr;
@@ -94,7 +95,10 @@ struct evsel {
        struct evsel            *metric_leader;
        bool                    collect_stat;
        bool                    weak_group;
+       bool                    reset_group;
+       bool                    errored;
        bool                    percore;
+       int                     cpu_iter;
        const char              *pmu_name;
        struct {
                perf_evsel__sb_cb_t     *cb;
@@ -218,11 +222,14 @@ int perf_evsel__set_filter(struct evsel *evsel, const char *filter);
 int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter);
 int perf_evsel__append_addr_filter(struct evsel *evsel,
                                   const char *filter);
+int evsel__enable_cpu(struct evsel *evsel, int cpu);
 int evsel__enable(struct evsel *evsel);
 int evsel__disable(struct evsel *evsel);
+int evsel__disable_cpu(struct evsel *evsel, int cpu);
 
 int perf_evsel__open_per_cpu(struct evsel *evsel,
-                            struct perf_cpu_map *cpus);
+                            struct perf_cpu_map *cpus,
+                            int cpu);
 int perf_evsel__open_per_thread(struct evsel *evsel,
                                struct perf_thread_map *threads);
 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
index f9a20a39b64adbacbf2ba1ca1aac8ed5f8f37499..7d226241f1d77854c9ac31f0c1b723ba79cd3f65 100644 (file)
@@ -12,7 +12,8 @@
 #define MAXIDLEN 256
 %}
 
-%pure-parser
+%define api.pure full
+
 %parse-param { double *final_val }
 %parse-param { struct parse_ctx *ctx }
 %parse-param { const char **pp }
index f9f18b8b1df9a2e3973aadb6938cb2c20c025b79..aed49806a09bab8f9a835fc965bca33fec796c2c 100644 (file)
@@ -8,15 +8,12 @@
  */
 
 #include <sys/types.h>
-#include <stdio.h>
-#include <getopt.h>
 #include <stddef.h>
 #include <libelf.h>
 #include <string.h>
 #include <stdlib.h>
 #include <unistd.h>
 #include <inttypes.h>
-#include <limits.h>
 #include <fcntl.h>
 #include <err.h>
 #ifdef HAVE_DWARF_SUPPORT
@@ -31,8 +28,6 @@
 #define NT_GNU_BUILD_ID 3
 #endif
 
-#define JVMTI
-
 #define BUILD_ID_URANDOM /* different uuid for each run */
 
 #ifdef HAVE_LIBCRYPTO
@@ -511,44 +506,3 @@ error:
 
        return retval;
 }
-
-#ifndef JVMTI
-
-static unsigned char x86_code[] = {
-    0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */
-    0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */
-    0xCD, 0x80            /* int $0x80 */
-};
-
-static struct options options;
-
-int main(int argc, char **argv)
-{
-       int c, fd, ret;
-
-       while ((c = getopt(argc, argv, "o:h")) != -1) {
-               switch (c) {
-               case 'o':
-                       options.output = optarg;
-                       break;
-               case 'h':
-                       printf("Usage: genelf -o output_file [-h]\n");
-                       return 0;
-               default:
-                       errx(1, "unknown option");
-               }
-       }
-
-       fd = open(options.output, O_CREAT|O_TRUNC|O_RDWR, 0666);
-       if (fd == -1)
-               err(1, "cannot create file %s", options.output);
-
-       ret = jit_write_elf(fd, "main", x86_code, sizeof(x86_code));
-       close(fd);
-
-       if (ret != 0)
-               unlink(options.output);
-
-       return ret;
-}
-#endif
index becc2d1094237580901dfe0578ad63900cc77635..4246e7447e54a1247aad4e79f3a5de37616ec5ad 100644 (file)
@@ -850,7 +850,7 @@ int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
  */
 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
 {
-       return -1;
+       return ENOSYS; /* Not implemented */
 }
 
 static int write_cpuid(struct feat_fd *ff,
@@ -1089,21 +1089,18 @@ static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
        fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
 }
 
-static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
+#define MAX_CACHE_LVL 4
+
+static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
 {
        u32 i, cnt = 0;
-       long ncpus;
        u32 nr, cpu;
        u16 level;
 
-       ncpus = sysconf(_SC_NPROCESSORS_CONF);
-       if (ncpus < 0)
-               return -1;
-
-       nr = (u32)(ncpus & UINT_MAX);
+       nr = cpu__max_cpu();
 
        for (cpu = 0; cpu < nr; cpu++) {
-               for (level = 0; level < 10; level++) {
+               for (level = 0; level < MAX_CACHE_LVL; level++) {
                        struct cpu_cache_level c;
                        int err;
 
@@ -1123,18 +1120,12 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
                                caches[cnt++] = c;
                        else
                                cpu_cache_level__free(&c);
-
-                       if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
-                               goto out;
                }
        }
- out:
        *cntp = cnt;
        return 0;
 }
 
-#define MAX_CACHE_LVL 4
-
 static int write_cache(struct feat_fd *ff,
                       struct evlist *evlist __maybe_unused)
 {
@@ -1143,7 +1134,7 @@ static int write_cache(struct feat_fd *ff,
        u32 cnt = 0, i, version = 1;
        int ret;
 
-       ret = build_caches(caches, max_caches, &cnt);
+       ret = build_caches(caches, &cnt);
        if (ret)
                goto out;
 
@@ -2931,7 +2922,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
        if (ret == -1)
                return -1;
 
-       stctime = st.st_ctime;
+       stctime = st.st_mtime;
        fprintf(fp, "# captured on    : %s", ctime(&stctime));
 
        fprintf(fp, "# header version : %u\n", header->version);
index 45286900aacbff35c0ecb2d333f38ab0b81f3eb3..0aa63aeb58ecf048e67401846986000b27b12f34 100644 (file)
@@ -339,10 +339,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
        list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
 
 #define hists__for_each_format(hists, format) \
-       perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
+       perf_hpp_list__for_each_format((hists)->hpp_list, format)
 
 #define hists__for_each_sort_list(hists, format) \
-       perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
+       perf_hpp_list__for_each_sort_list((hists)->hpp_list, format)
 
 extern struct perf_hpp_fmt perf_hpp__format[];
 
index f01d48a8d707987de71244cec8df09353c592129..b8a5159361b4123651509d9510e851a51f6c1a42 100644 (file)
@@ -5,10 +5,93 @@
 
 /* linkage.h ... for including arch/x86/lib/memcpy_64.S */
 
-#define ENTRY(name)                            \
-       .globl name;                            \
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL          ;
+#endif
+
+#ifndef __ALIGN
+#define __ALIGN                .align 4,0x90
+#define __ALIGN_STR    ".align 4,0x90"
+#endif
+
+/* SYM_T_FUNC -- type used by assembler to mark functions */
+#ifndef SYM_T_FUNC
+#define SYM_T_FUNC                             STT_FUNC
+#endif
+
+/* SYM_A_* -- align the symbol? */
+#define SYM_A_ALIGN                            ALIGN
+
+/* SYM_L_* -- linkage of symbols */
+#define SYM_L_GLOBAL(name)                     .globl name
+#define SYM_L_LOCAL(name)                      /* nothing */
+
+#define ALIGN __ALIGN
+
+/* === generic annotations === */
+
+/* SYM_ENTRY -- use only if you have to for non-paired symbols */
+#ifndef SYM_ENTRY
+#define SYM_ENTRY(name, linkage, align...)             \
+       linkage(name) ASM_NL                            \
+       align ASM_NL                                    \
        name:
+#endif
+
+/* SYM_START -- use only if you have to */
+#ifndef SYM_START
+#define SYM_START(name, linkage, align...)             \
+       SYM_ENTRY(name, linkage, align)
+#endif
+
+/* SYM_END -- use only if you have to */
+#ifndef SYM_END
+#define SYM_END(name, sym_type)                                \
+       .type name sym_type ASM_NL                      \
+       .size name, .-name
+#endif
+
+/*
+ * SYM_FUNC_START_ALIAS -- use where there are two global names for one
+ * function
+ */
+#ifndef SYM_FUNC_START_ALIAS
+#define SYM_FUNC_START_ALIAS(name)                     \
+       SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START -- use for global functions */
+#ifndef SYM_FUNC_START
+/*
+ * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
+ * later.
+ */
+#define SYM_FUNC_START(name)                           \
+       SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_LOCAL -- use for local functions */
+#ifndef SYM_FUNC_START_LOCAL
+/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
+#define SYM_FUNC_START_LOCAL(name)                     \
+       SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
+#ifndef SYM_FUNC_END_ALIAS
+#define SYM_FUNC_END_ALIAS(name)                       \
+       SYM_END(name, SYM_T_FUNC)
+#endif
 
-#define ENDPROC(name)
+/*
+ * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
+ * SYM_FUNC_START_WEAK, ...
+ */
+#ifndef SYM_FUNC_END
+/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
+#define SYM_FUNC_END(name)                             \
+       SYM_END(name, SYM_T_FUNC)
+#endif
 
 #endif /* PERF_LINUX_LINKAGE_H_ */
index 416d174d223c522e7690e3efd4eec74bb7576440..c8c5410315e817c2b9533eb8df718dc1bf98915f 100644 (file)
@@ -2446,6 +2446,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
 
        list_for_each_entry(ilist, &inline_node->val, list) {
                struct map_symbol ilist_ms = {
+                       .maps = ms->maps,
                        .map = map,
                        .sym = ilist->symbol,
                };
index 6a4d350d5cdbab9f0fbabe8ac8bc53f3cefbc6fc..02aee946b6c14c1819eebf329e9536c43f9edc51 100644 (file)
@@ -103,8 +103,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                if (!strcmp(ev->name, ids[i])) {
                        if (!metric_events[i])
                                metric_events[i] = ev;
+                       i++;
+                       if (i == idnum)
+                               break;
                } else {
-                       if (++i == idnum) {
+                       if (i + 1 == idnum) {
                                /* Discard the whole match and start again */
                                i = 0;
                                memset(metric_events, 0,
@@ -124,7 +127,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                }
        }
 
-       if (i != idnum - 1) {
+       if (i != idnum) {
                /* Not whole match */
                return NULL;
        }
index 063d1b93c53d1821f8cd7db409309ceb722aa30d..3b664fa673a6ce031b492e84ad2964c3814fb833 100644 (file)
 #include "mmap.h"
 #include "../perf.h"
 #include <internal/lib.h> /* page_size */
+#include <linux/bitmap.h>
+
+#define MASK_SIZE 1023
+void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
+{
+       char buf[MASK_SIZE + 1];
+       size_t len;
+
+       len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
+       buf[len] = '\0';
+       pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
+}
 
 size_t mmap__mmap_len(struct mmap *map)
 {
@@ -207,6 +219,8 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
 
 void mmap__munmap(struct mmap *map)
 {
+       bitmap_free(map->affinity_mask.bits);
+
        perf_mmap__aio_munmap(map);
        if (map->data != NULL) {
                munmap(map->data, mmap__mmap_len(map));
@@ -215,7 +229,7 @@ void mmap__munmap(struct mmap *map)
        auxtrace_mmap__munmap(&map->auxtrace_mmap);
 }
 
-static void build_node_mask(int node, cpu_set_t *mask)
+static void build_node_mask(int node, struct mmap_cpu_mask *mask)
 {
        int c, cpu, nr_cpus;
        const struct perf_cpu_map *cpu_map = NULL;
@@ -228,17 +242,23 @@ static void build_node_mask(int node, cpu_set_t *mask)
        for (c = 0; c < nr_cpus; c++) {
                cpu = cpu_map->map[c]; /* map c index to online cpu index */
                if (cpu__get_node(cpu) == node)
-                       CPU_SET(cpu, mask);
+                       set_bit(cpu, mask->bits);
        }
 }
 
-static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
+static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
 {
-       CPU_ZERO(&map->affinity_mask);
+       map->affinity_mask.nbits = cpu__max_cpu();
+       map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
+       if (!map->affinity_mask.bits)
+               return -1;
+
        if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
                build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
        else if (mp->affinity == PERF_AFFINITY_CPU)
-               CPU_SET(map->core.cpu, &map->affinity_mask);
+               set_bit(map->core.cpu, map->affinity_mask.bits);
+
+       return 0;
 }
 
 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
@@ -249,7 +269,15 @@ int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
                return -1;
        }
 
-       perf_mmap__setup_affinity_mask(map, mp);
+       if (mp->affinity != PERF_AFFINITY_SYS &&
+               perf_mmap__setup_affinity_mask(map, mp)) {
+               pr_debug2("failed to alloc mmap affinity mask, error %d\n",
+                         errno);
+               return -1;
+       }
+
+       if (verbose == 2)
+               mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
 
        map->core.flush = mp->flush;
 
index bee4e83f7109d33ebc495658f5534bc74a75a215..9d5f589f02ae70e187539ad08202157c08801771 100644 (file)
 #include "event.h"
 
 struct aiocb;
+
+struct mmap_cpu_mask {
+       unsigned long *bits;
+       size_t nbits;
+};
+
+#define MMAP_CPU_MASK_BYTES(m) \
+       (BITS_TO_LONGS(((struct mmap_cpu_mask *)m)->nbits) * sizeof(unsigned long))
+
 /**
  * struct mmap - perf's ring buffer mmap details
  *
@@ -31,7 +40,7 @@ struct mmap {
                int              nr_cblocks;
        } aio;
 #endif
-       cpu_set_t       affinity_mask;
+       struct mmap_cpu_mask    affinity_mask;
        void            *data;
        int             comp_level;
 };
@@ -52,4 +61,6 @@ int perf_mmap__push(struct mmap *md, void *to,
 
 size_t mmap__mmap_len(struct mmap *map);
 
+void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag);
+
 #endif /*__PERF_MMAP_H */
index e2eea4e601b41cbb59e3cbc10e574e05d1feeac6..94f8bcd835826de88b83c8df50141bafdffce7f2 100644 (file)
@@ -1,4 +1,4 @@
-%pure-parser
+%define api.pure full
 %parse-param {void *_parse_state}
 %parse-param {void *scanner}
 %lex-param {void* scanner}
index 345b5ccc90f68bfd353339988bae29a3f8f080b2..ab0cfd790ad00dddbf06a5e6b0baafe573614f9a 100644 (file)
@@ -324,8 +324,7 @@ static int _hist_entry__sym_snprintf(struct map_symbol *ms,
        return ret;
 }
 
-static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
-                                   size_t size, unsigned int width)
+int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
 {
        return _hist_entry__sym_snprintf(&he->ms, he->ip,
                                         he->level, bf, size, width);
@@ -2681,12 +2680,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
                        ret = sort_dimension__add(list, tok, evlist, level);
                        if (ret == -EINVAL) {
                                if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
-                                       pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
+                                       ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
                                else
-                                       pr_err("Invalid --sort key: `%s'", tok);
+                                       ui__error("Invalid --sort key: `%s'", tok);
                                break;
                        } else if (ret == -ESRCH) {
-                               pr_err("Unknown --sort key: `%s'", tok);
+                               ui__error("Unknown --sort key: `%s'", tok);
                                break;
                        }
                }
@@ -2743,7 +2742,7 @@ static int setup_sort_order(struct evlist *evlist)
                return 0;
 
        if (sort_order[1] == '\0') {
-               pr_err("Invalid --sort key: `+'");
+               ui__error("Invalid --sort key: `+'");
                return -EINVAL;
        }
 
@@ -2959,6 +2958,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok)
                if (strncasecmp(tok, sd->name, strlen(tok)))
                        continue;
 
+               if (sort__mode != SORT_MODE__MEMORY)
+                       return -EINVAL;
+
                return __sort_dimension__add_output(list, sd);
        }
 
@@ -2968,6 +2970,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok)
                if (strncasecmp(tok, sd->name, strlen(tok)))
                        continue;
 
+               if (sort__mode != SORT_MODE__BRANCH)
+                       return -EINVAL;
+
                return __sort_dimension__add_output(list, sd);
        }
 
@@ -3034,7 +3039,7 @@ static int __setup_output_field(void)
                strp++;
 
        if (!strlen(strp)) {
-               pr_err("Invalid --fields key: `+'");
+               ui__error("Invalid --fields key: `+'");
                goto out;
        }
 
index 5aff9542d9b79fc1d95735b101fd30c03fc088ba..6c862d62d05258c5105ef795c3b2d6089d4ddecf 100644 (file)
@@ -164,6 +164,8 @@ static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
        return he->callchain_size != 0;
 }
 
+int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width);
+
 static inline bool hist_entry__has_pairs(struct hist_entry *he)
 {
        return !list_empty(&he->pairs.node);
index 332cb730785bd3defe9b8e29c8d6080c6551cd68..5f26137b8d6028fa0fc00e772ebacf43ba3e007e 100644 (file)
@@ -464,7 +464,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
 
 int create_perf_stat_counter(struct evsel *evsel,
                             struct perf_stat_config *config,
-                            struct target *target)
+                            struct target *target,
+                            int cpu)
 {
        struct perf_event_attr *attr = &evsel->core.attr;
        struct evsel *leader = evsel->leader;
@@ -518,7 +519,7 @@ int create_perf_stat_counter(struct evsel *evsel,
        }
 
        if (target__has_cpu(target) && !target__has_per_thread(target))
-               return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel));
+               return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu);
 
        return perf_evsel__open_per_thread(evsel, evsel->core.threads);
 }
index bfa9aaf36ce6fc6027853bc6e6c90e4f2496bdc1..fb990efa54a8a83d8960aa0cedde11301f819ca2 100644 (file)
@@ -214,7 +214,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
 
 int create_perf_stat_counter(struct evsel *evsel,
                             struct perf_stat_config *config,
-                            struct target *target);
+                            struct target *target,
+                            int cpu);
 void
 perf_evlist__print_counters(struct evlist *evlist,
                            struct perf_stat_config *config,
index 6658fbf196e6a36b578ddd1a6245c00456fd36aa..1965aefccb022b98f8362da8b03815a07d5c250c 100644 (file)
@@ -920,6 +920,9 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
                if (curr_map == NULL)
                        return -1;
 
+               if (curr_dso->kernel)
+                       map__kmap(curr_map)->kmaps = kmaps;
+
                if (adjust_kernel_syms) {
                        curr_map->start  = shdr->sh_addr + ref_reloc(kmap);
                        curr_map->end    = curr_map->start + shdr->sh_size;
index 0111d246d1ca250e59b5e828cfad2167045905ad..54a2857c2510aeec691a03f3fde21c43c6d0b913 100644 (file)
@@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include
 
 OUTPUT=$(srctree)/
 ifeq ("$(origin O)", "command line")
-       OUTPUT := $(O)/power/acpi/
+       OUTPUT := $(O)/tools/power/acpi/
 endif
 #$(info Determined 'OUTPUT' to be $(OUTPUT))
 
index 28c11c6b4d0683064247f15c8a967f38fe38eb7d..d1d18ff5c9114b182036881b8bc84d647e8a3cd0 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: cfsize - Common get file size function
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 6b41d8b64a0092dceb35e42865248f099e222b8f..c3708f30ab3a3aa19ebc4b67f2e5f32de9e4e295 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: getopt
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d1f3d44e315eb71720cb8c9fbeb0fab87de15ae3..5aaddc79adf7718b5baa62f3795667dfbf140d5f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 30913f124dd5870b8a644f667ce2467156e5bdb5..fd05ddee240f9ad9326ba3a2fcc53ca2b4f3480c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: osunixdir - Unix directory access interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 29dfb47adfeb6e3897d2df209423a69a35ce645a..c565546e85bce7eeb2c908b1d53804a03b1e245b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: osunixmap - Unix OSL for file mappings
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 83d3b3b829b8d550885c07978165abf4dc33b6e6..5b2fd968535fd6b28a71f04ac2d39dd19df21797 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: osunixxf - UNIX OSL interfaces
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 2eb0aaa4f4624f826acf792733b632999aac7deb..26a5eae9f87fb8727890b0726c026db3d0a72010 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: acpidump.h - Include file for acpi_dump utility
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 820baeb5092bd9d8bf19f1fcda31394058b57838..76433296055d7c1fd2a180fbfe52b9560c32ab53 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: apdump - Dump routines for ACPI tables (acpidump)
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 16d919bd133b6824f80d533a8f4c490b199023f5..a682bae4e6f6bbeba6ae877b76441551657ef64a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: apfiles - File-related functions for acpidump utility
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index d8f1b57537d30764f6bd0860b44aa8e3c4ae7152..046e6b8d6baab28771323670226f9d9dfc235a0f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Module Name: apmain - Main module for the acpidump utility
  *
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
  *
  *****************************************************************************/
 
index 944183f9ed5a3783d2d37e30ff8cb515a43d60cb..2b2b8167c65be14594b7838d9bbc3b9c6780d3c7 100644 (file)
@@ -15,7 +15,7 @@ struct process_cmd_struct {
        int arg;
 };
 
-static const char *version_str = "v1.1";
+static const char *version_str = "v1.2";
 static const int supported_api_ver = 1;
 static struct isst_if_platform_info isst_platform_info;
 static char *progname;
@@ -1384,14 +1384,10 @@ static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                goto disp_result;
        }
 
-       if (auto_mode) {
-               if (status) {
-                       ret = set_pbf_core_power(cpu);
-                       if (ret)
-                               goto disp_result;
-               } else {
-                       isst_pm_qos_config(cpu, 0, 0);
-               }
+       if (auto_mode && status) {
+               ret = set_pbf_core_power(cpu);
+               if (ret)
+                       goto disp_result;
        }
 
        ret = isst_set_pbf_fact_status(cpu, 1, status);
@@ -1408,6 +1404,9 @@ static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                }
        }
 
+       if (auto_mode && !status)
+               isst_pm_qos_config(cpu, 0, 0);
+
 disp_result:
        if (status)
                isst_display_result(cpu, outf, "base-freq", "enable",
@@ -1496,14 +1495,10 @@ static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
        int ret;
        int status = *(int *)arg4;
 
-       if (auto_mode) {
-               if (status) {
-                       ret = isst_pm_qos_config(cpu, 1, 1);
-                       if (ret)
-                               goto disp_results;
-               } else {
-                       isst_pm_qos_config(cpu, 0, 0);
-               }
+       if (auto_mode && status) {
+               ret = isst_pm_qos_config(cpu, 1, 1);
+               if (ret)
+                       goto disp_results;
        }
 
        ret = isst_set_pbf_fact_status(cpu, 0, status);
@@ -1524,6 +1519,9 @@ static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                        ret = isst_set_trl(cpu, fact_trl);
                if (ret && auto_mode)
                        isst_pm_qos_config(cpu, 0, 0);
+       } else {
+               if (auto_mode)
+                       isst_pm_qos_config(cpu, 0, 0);
        }
 
 disp_results:
@@ -1638,7 +1636,7 @@ static void set_fact_enable(int arg)
                        if (ret)
                                goto error_disp;
                }
-               isst_display_result(i, outf, "turbo-freq --auto", "enable", 0);
+               isst_display_result(-1, outf, "turbo-freq --auto", "enable", 0);
        }
 
        return;
index d14c7bcd327af1c17ae947027c39684ac10bbb98..81a119f688a3a04b4c82bd29289043a9627764f0 100644 (file)
@@ -6,6 +6,44 @@
 
 #include "isst.h"
 
+int isst_write_pm_config(int cpu, int cp_state)
+{
+       unsigned int req, resp;
+       int ret;
+
+       if (cp_state)
+               req = BIT(16);
+       else
+               req = 0;
+
+       ret = isst_send_mbox_command(cpu, WRITE_PM_CONFIG, PM_FEATURE, 0, req,
+                                    &resp);
+       if (ret)
+               return ret;
+
+       debug_printf("cpu:%d WRITE_PM_CONFIG resp:%x\n", cpu, resp);
+
+       return 0;
+}
+
+int isst_read_pm_config(int cpu, int *cp_state, int *cp_cap)
+{
+       unsigned int resp;
+       int ret;
+
+       ret = isst_send_mbox_command(cpu, READ_PM_CONFIG, PM_FEATURE, 0, 0,
+                                    &resp);
+       if (ret)
+               return ret;
+
+       debug_printf("cpu:%d READ_PM_CONFIG resp:%x\n", cpu, resp);
+
+       *cp_state = resp & BIT(16);
+       *cp_cap = resp & BIT(0) ? 1 : 0;
+
+       return 0;
+}
+
 int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
 {
        unsigned int resp;
@@ -36,6 +74,7 @@ int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
 int isst_get_ctdp_control(int cpu, int config_index,
                          struct isst_pkg_ctdp_level_info *ctdp_level)
 {
+       int cp_state, cp_cap;
        unsigned int resp;
        int ret;
 
@@ -50,6 +89,15 @@ int isst_get_ctdp_control(int cpu, int config_index,
        ctdp_level->fact_enabled = !!(resp & BIT(16));
        ctdp_level->pbf_enabled = !!(resp & BIT(17));
 
+       ret = isst_read_pm_config(cpu, &cp_state, &cp_cap);
+       if (ret) {
+               debug_printf("cpu:%d pm_config is not supported \n", cpu);
+       } else {
+               debug_printf("cpu:%d pm_config SST-CP state:%d cap:%d \n", cpu, cp_state, cp_cap);
+               ctdp_level->sst_cp_support = cp_cap;
+               ctdp_level->sst_cp_enabled = cp_state;
+       }
+
        debug_printf(
                "cpu:%d CONFIG_TDP_GET_TDP_CONTROL resp:%x fact_support:%d pbf_support: %d fact_enabled:%d pbf_enabled:%d\n",
                cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support,
@@ -779,6 +827,13 @@ int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
                        debug_printf("Turbo-freq feature must be disabled first\n");
                        return -EINVAL;
                }
+               ret = isst_write_pm_config(cpu, 0);
+               if (ret)
+                       perror("isst_write_pm_config\n");
+       } else {
+               ret = isst_write_pm_config(cpu, 1);
+               if (ret)
+                       perror("isst_write_pm_config\n");
        }
 
        ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
index 040dd09d5eee41ac23cb4f62b47ede5cac09e597..4fb0c1d49d6497ba2af2f50044e66018fbc6571a 100644 (file)
@@ -418,6 +418,17 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
                        snprintf(value, sizeof(value), "unsupported");
                format_and_print(outf, base_level + 4, header, value);
 
+               snprintf(header, sizeof(header),
+                        "speed-select-core-power");
+               if (ctdp_level->sst_cp_support) {
+                       if (ctdp_level->sst_cp_enabled)
+                               snprintf(value, sizeof(value), "enabled");
+                       else
+                               snprintf(value, sizeof(value), "disabled");
+               } else
+                       snprintf(value, sizeof(value), "unsupported");
+               format_and_print(outf, base_level + 4, header, value);
+
                if (is_clx_n_platform()) {
                        if (ctdp_level->pbf_support)
                                _isst_pbf_display_information(cpu, outf,
@@ -634,13 +645,15 @@ void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
        char header[256];
        char value[256];
 
-       snprintf(header, sizeof(header), "package-%d",
-                get_physical_package_id(cpu));
-       format_and_print(outf, 1, header, NULL);
-       snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
-       format_and_print(outf, 2, header, NULL);
-       snprintf(header, sizeof(header), "cpu-%d", cpu);
-       format_and_print(outf, 3, header, NULL);
+       if (cpu >= 0) {
+               snprintf(header, sizeof(header), "package-%d",
+                        get_physical_package_id(cpu));
+               format_and_print(outf, 1, header, NULL);
+               snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+               format_and_print(outf, 2, header, NULL);
+               snprintf(header, sizeof(header), "cpu-%d", cpu);
+               format_and_print(outf, 3, header, NULL);
+       }
        snprintf(header, sizeof(header), "%s", feature);
        format_and_print(outf, 4, header, NULL);
        snprintf(header, sizeof(header), "%s", cmd);
index cdf0f8a6dbbfabf3958a9549a991ba08ba3d531b..ad5aa6341d0fc10da35137fe44b6211c52e81589 100644 (file)
 #define PM_CLOS_OFFSET                         0x08
 #define PQR_ASSOC_OFFSET                       0x20
 
+#define READ_PM_CONFIG                         0x94
+#define WRITE_PM_CONFIG                                0x95
+#define PM_FEATURE                             0x03
+
 #define DISP_FREQ_MULTIPLIER 100
 
 struct isst_clos_config {
@@ -119,6 +123,8 @@ struct isst_pkg_ctdp_level_info {
        int pbf_support;
        int fact_enabled;
        int pbf_enabled;
+       int sst_cp_support;
+       int sst_cp_enabled;
        int tdp_ratio;
        int active;
        int tdp_control;
index efe06d6219837c076f37924374ec18ecef0b7632..e59eb9e7f9236b5dc872b270b21b072b4886b6ef 100755 (executable)
@@ -31,15 +31,12 @@ class KunitStatus(Enum):
        TEST_FAILURE = auto()
 
 def create_default_kunitconfig():
-       if not os.path.exists(kunit_kernel.KUNITCONFIG_PATH):
+       if not os.path.exists(kunit_kernel.kunitconfig_path):
                shutil.copyfile('arch/um/configs/kunit_defconfig',
-                               kunit_kernel.KUNITCONFIG_PATH)
+                               kunit_kernel.kunitconfig_path)
 
 def run_tests(linux: kunit_kernel.LinuxSourceTree,
              request: KunitRequest) -> KunitResult:
-       if request.defconfig:
-               create_default_kunitconfig()
-
        config_start = time.time()
        success = linux.build_reconfig(request.build_dir)
        config_end = time.time()
@@ -108,15 +105,22 @@ def main(argv, linux=None):
        run_parser.add_argument('--build_dir',
                                help='As in the make command, it specifies the build '
                                'directory.',
-                               type=str, default=None, metavar='build_dir')
+                               type=str, default='', metavar='build_dir')
 
        run_parser.add_argument('--defconfig',
-                               help='Uses a default kunitconfig.',
+                               help='Uses a default .kunitconfig.',
                                action='store_true')
 
        cli_args = parser.parse_args(argv)
 
        if cli_args.subcommand == 'run':
+               if cli_args.build_dir:
+                       if not os.path.exists(cli_args.build_dir):
+                               os.mkdir(cli_args.build_dir)
+                       kunit_kernel.kunitconfig_path = os.path.join(
+                               cli_args.build_dir,
+                               kunit_kernel.kunitconfig_path)
+
                if cli_args.defconfig:
                        create_default_kunitconfig()
 
index bf3876835331327c7525fb3727647bd9070ec0b9..cc5d844ecca13bfe57f69f13b9c3180834e90a79 100644 (file)
@@ -14,7 +14,7 @@ import os
 import kunit_config
 
 KCONFIG_PATH = '.config'
-KUNITCONFIG_PATH = 'kunitconfig'
+kunitconfig_path = '.kunitconfig'
 
 class ConfigError(Exception):
        """Represents an error trying to configure the Linux kernel."""
@@ -82,7 +82,7 @@ class LinuxSourceTree(object):
 
        def __init__(self):
                self._kconfig = kunit_config.Kconfig()
-               self._kconfig.read_from_file(KUNITCONFIG_PATH)
+               self._kconfig.read_from_file(kunitconfig_path)
                self._ops = LinuxSourceTreeOperations()
 
        def clean(self):
@@ -111,7 +111,7 @@ class LinuxSourceTree(object):
                return True
 
        def build_reconfig(self, build_dir):
-               """Creates a new .config if it is not a subset of the kunitconfig."""
+               """Creates a new .config if it is not a subset of the .kunitconfig."""
                kconfig_path = get_kconfig_path(build_dir)
                if os.path.exists(kconfig_path):
                        existing_kconfig = kunit_config.Kconfig()
@@ -140,10 +140,10 @@ class LinuxSourceTree(object):
                        return False
                return True
 
-       def run_kernel(self, args=[], timeout=None, build_dir=None):
+       def run_kernel(self, args=[], timeout=None, build_dir=''):
                args.extend(['mem=256M'])
                process = self._ops.linux_bin(args, timeout, build_dir)
-               with open('test.log', 'w') as f:
+               with open(os.path.join(build_dir, 'test.log'), 'w') as f:
                        for line in process.stdout:
                                f.write(line.rstrip().decode('ascii') + '\n')
                                yield line.rstrip().decode('ascii')
index 4a12baa0cd4e0d6cd38b3a954a736ece3be32fc9..cba97756ac4a5479f6608dfa28062e2afcb56ae7 100755 (executable)
@@ -174,6 +174,7 @@ class KUnitMainTest(unittest.TestCase):
                kunit.main(['run'], self.linux_source_mock)
                assert self.linux_source_mock.build_reconfig.call_count == 1
                assert self.linux_source_mock.run_kernel.call_count == 1
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300)
                self.print_mock.assert_any_call(StrContains('Testing complete.'))
 
        def test_run_passes_args_fail(self):
@@ -199,7 +200,14 @@ class KUnitMainTest(unittest.TestCase):
                timeout = 3453
                kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
                assert self.linux_source_mock.build_reconfig.call_count == 1
-               self.linux_source_mock.run_kernel.assert_called_once_with(timeout=timeout)
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout)
+               self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+       def test_run_builddir(self):
+               build_dir = '.kunit'
+               kunit.main(['run', '--build_dir', build_dir], self.linux_source_mock)
+               assert self.linux_source_mock.build_reconfig.call_count == 1
+               self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
                self.print_mock.assert_any_call(StrContains('Testing complete.'))
 
 if __name__ == '__main__':
index c4a9196d794c9d42251400122884a28b3ef9b492..dbebf05f5931337adcdfdfe25bd31c261ed14b07 100644 (file)
@@ -1,10 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 ldflags-y += --wrap=ioremap_wc
 ldflags-y += --wrap=memremap
-ldflags-y += --wrap=devm_ioremap_nocache
+ldflags-y += --wrap=devm_ioremap
 ldflags-y += --wrap=devm_memremap
 ldflags-y += --wrap=devm_memunmap
-ldflags-y += --wrap=ioremap_nocache
+ldflags-y += --wrap=ioremap
 ldflags-y += --wrap=iounmap
 ldflags-y += --wrap=memunmap
 ldflags-y += --wrap=__devm_request_region
index 3f55f2f99112fe5475a0d414e6f33e4d5d7dde87..03e40b3b0106e1d3c429d5f88552cedf717895aa 100644 (file)
@@ -73,7 +73,7 @@ void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
        return fallback_fn(offset, size);
 }
 
-void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
+void __iomem *__wrap_devm_ioremap(struct device *dev,
                resource_size_t offset, unsigned long size)
 {
        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
@@ -81,9 +81,9 @@ void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
        if (nfit_res)
                return (void __iomem *) nfit_res->buf + offset
                        - nfit_res->res.start;
-       return devm_ioremap_nocache(dev, offset, size);
+       return devm_ioremap(dev, offset, size);
 }
-EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
+EXPORT_SYMBOL(__wrap_devm_ioremap);
 
 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
                size_t size, unsigned long flags)
@@ -187,11 +187,11 @@ void __wrap_devm_memunmap(struct device *dev, void *addr)
 }
 EXPORT_SYMBOL(__wrap_devm_memunmap);
 
-void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
+void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
 {
-       return __nfit_test_ioremap(offset, size, ioremap_nocache);
+       return __nfit_test_ioremap(offset, size, ioremap);
 }
-EXPORT_SYMBOL(__wrap_ioremap_nocache);
+EXPORT_SYMBOL(__wrap_ioremap);
 
 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
 {
index 0bf5640f1f07102c22a5514a0f0e68b8e14fd69c..db3c07beb9d1ca146ceb5df34cd01aad61802e1d 100644 (file)
@@ -207,8 +207,6 @@ typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
                 const guid_t *guid, u64 rev, u64 func,
                 union acpi_object *argv4);
-void __iomem *__wrap_ioremap_nocache(resource_size_t offset,
-               unsigned long size);
 void __wrap_iounmap(volatile void __iomem *addr);
 void nfit_test_setup(nfit_test_lookup_fn lookup,
                nfit_test_evaluate_dsm_fn evaluate);
index b001c602414b717440a52b94370273ef05e66c4b..c4939a2a5f5d1be80b2f63bb0b78bc5a7d3c6e9e 100644 (file)
@@ -50,6 +50,7 @@ TARGETS += splice
 TARGETS += static_keys
 TARGETS += sync
 TARGETS += sysctl
+TARGETS += timens
 ifneq (1, $(quicktest))
 TARGETS += timers
 endif
index 419652458da4ab1d7d855e430c502c7b55d82cd7..1ff0a9f49c01ca82ea14678fba4b476202557da9 100644 (file)
@@ -40,3 +40,4 @@ xdping
 test_cpp
 /no_alu32
 /bpf_gcc
+bpf_helper_defs.h
index e0fe01d9ec33b42be030487b8f930c534b023879..e2fd6f8d579cdd989c602340a93c97f9600cc780 100644 (file)
@@ -120,9 +120,9 @@ force:
 $(BPFOBJ): force
        $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
 
-BPF_HELPERS := $(BPFDIR)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
-$(BPFDIR)/bpf_helper_defs.h:
-       $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ bpf_helper_defs.h
+BPF_HELPERS := $(OUTPUT)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h)
+$(OUTPUT)/bpf_helper_defs.h:
+       $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ $(OUTPUT)/bpf_helper_defs.h
 
 # Get Clang's default includes on this system, as opposed to those seen by
 # '-target bpf'. This fixes "missing" files on some architectures/distros,
diff --git a/tools/testing/selftests/bpf/test_ftrace.sh b/tools/testing/selftests/bpf/test_ftrace.sh
new file mode 100755 (executable)
index 0000000..20de7bb
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+TR=/sys/kernel/debug/tracing/
+clear_trace() { # reset trace output
+    echo > $TR/trace
+}
+
+disable_tracing() { # stop trace recording
+    echo 0 > $TR/tracing_on
+}
+
+enable_tracing() { # start trace recording
+    echo 1 > $TR/tracing_on
+}
+
+reset_tracer() { # reset the current tracer
+    echo nop > $TR/current_tracer
+}
+
+disable_tracing
+clear_trace
+
+echo "" > $TR/set_ftrace_filter
+echo '*printk* *console* *wake* *serial* *lock*' > $TR/set_ftrace_notrace
+
+echo "bpf_prog_test*" > $TR/set_graph_function
+echo "" > $TR/set_graph_notrace
+
+echo function_graph > $TR/current_tracer
+
+enable_tracing
+./test_progs -t fentry
+./test_progs -t fexit
+disable_tracing
+clear_trace
+
+reset_tracer
+
+exit 0
index d27fd929abb9003ec4562e970ee321c17e730532..87eaa49609a02f33dc7463e9625a7efdc1bc84e9 100644 (file)
@@ -408,10 +408,10 @@ static void update_map(int fd, int index)
        assert(!bpf_map_update_elem(fd, &index, &value, 0));
 }
 
-static int create_prog_dummy1(enum bpf_prog_type prog_type)
+static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
 {
        struct bpf_insn prog[] = {
-               BPF_MOV64_IMM(BPF_REG_0, 42),
+               BPF_MOV64_IMM(BPF_REG_0, ret),
                BPF_EXIT_INSN(),
        };
 
@@ -419,14 +419,15 @@ static int create_prog_dummy1(enum bpf_prog_type prog_type)
                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 }
 
-static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
+static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
+                                 int idx, int ret)
 {
        struct bpf_insn prog[] = {
                BPF_MOV64_IMM(BPF_REG_3, idx),
                BPF_LD_MAP_FD(BPF_REG_2, mfd),
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                             BPF_FUNC_tail_call),
-               BPF_MOV64_IMM(BPF_REG_0, 41),
+               BPF_MOV64_IMM(BPF_REG_0, ret),
                BPF_EXIT_INSN(),
        };
 
@@ -435,10 +436,9 @@ static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
 }
 
 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
-                            int p1key)
+                            int p1key, int p2key, int p3key)
 {
-       int p2key = 1;
-       int mfd, p1fd, p2fd;
+       int mfd, p1fd, p2fd, p3fd;
 
        mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
                             sizeof(int), max_elem, 0);
@@ -449,23 +449,24 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
                return -1;
        }
 
-       p1fd = create_prog_dummy1(prog_type);
-       p2fd = create_prog_dummy2(prog_type, mfd, p2key);
-       if (p1fd < 0 || p2fd < 0)
-               goto out;
+       p1fd = create_prog_dummy_simple(prog_type, 42);
+       p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
+       p3fd = create_prog_dummy_simple(prog_type, 24);
+       if (p1fd < 0 || p2fd < 0 || p3fd < 0)
+               goto err;
        if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
-               goto out;
+               goto err;
        if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
-               goto out;
+               goto err;
+       if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
+err:
+               close(mfd);
+               mfd = -1;
+       }
+       close(p3fd);
        close(p2fd);
        close(p1fd);
-
        return mfd;
-out:
-       close(p2fd);
-       close(p1fd);
-       close(mfd);
-       return -1;
 }
 
 static int create_map_in_map(void)
@@ -684,7 +685,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        }
 
        if (*fixup_prog1) {
-               map_fds[4] = create_prog_array(prog_type, 4, 0);
+               map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
                do {
                        prog[*fixup_prog1].imm = map_fds[4];
                        fixup_prog1++;
@@ -692,7 +693,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        }
 
        if (*fixup_prog2) {
-               map_fds[5] = create_prog_array(prog_type, 8, 7);
+               map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
                do {
                        prog[*fixup_prog2].imm = map_fds[5];
                        fixup_prog2++;
index ebcbf154c4600d7c9f8fc6d1acc63061cbaaa1df..604b4615173637ce4476ebd0ef6098fce9a03930 100644 (file)
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
        BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
        /* bpf_tail_call() */
-       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_MOV64_IMM(BPF_REG_3, 3),
        BPF_LD_MAP_FD(BPF_REG_2, 0),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
        BPF_EMIT_CALL(BPF_FUNC_sk_release),
        /* bpf_tail_call() */
-       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_MOV64_IMM(BPF_REG_3, 3),
        BPF_LD_MAP_FD(BPF_REG_2, 0),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
        BPF_SK_LOOKUP(sk_lookup_tcp),
        /* bpf_tail_call() */
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
-       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_MOV64_IMM(BPF_REG_3, 3),
        BPF_LD_MAP_FD(BPF_REG_2, 0),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
index a9a8f620e71cd8b84d74e9142fe9cb959f02d0a6..94c399d1facae30a6acc3a6867dc2f2e8f47e498 100644 (file)
 {
        "runtime/jit: tail_call within bounds, no prog",
        .insns = {
+       BPF_MOV64_IMM(BPF_REG_3, 3),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 1 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "runtime/jit: tail_call within bounds, key 2",
+       .insns = {
        BPF_MOV64_IMM(BPF_REG_3, 2),
        BPF_LD_MAP_FD(BPF_REG_2, 0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
        },
        .fixup_prog1 = { 1 },
        .result = ACCEPT,
+       .retval = 24,
+},
+{
+       "runtime/jit: tail_call within bounds, key 2 / key 2, first branch",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 13),
+       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 5, 9 },
+       .result = ACCEPT,
+       .retval = 24,
+},
+{
+       "runtime/jit: tail_call within bounds, key 2 / key 2, second branch",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 14),
+       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 5, 9 },
+       .result = ACCEPT,
+       .retval = 24,
+},
+{
+       "runtime/jit: tail_call within bounds, key 0 / key 2, first branch",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 13),
+       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 5, 9 },
+       .result = ACCEPT,
+       .retval = 24,
+},
+{
+       "runtime/jit: tail_call within bounds, key 0 / key 2, second branch",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 14),
+       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_3, 2),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 5, 9 },
+       .result = ACCEPT,
+       .retval = 42,
+},
+{
+       "runtime/jit: tail_call within bounds, different maps, first branch",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 13),
+       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 5 },
+       .fixup_prog2 = { 9 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "tail_call abusing map_ptr",
+       .result = ACCEPT,
        .retval = 1,
 },
+{
+       "runtime/jit: tail_call within bounds, different maps, second branch",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 14),
+       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, cb[0])),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_prog1 = { 5 },
+       .fixup_prog2 = { 9 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "tail_call abusing map_ptr",
+       .result = ACCEPT,
+       .retval = 42,
+},
 {
        "runtime/jit: tail_call out of bounds",
        .insns = {
index 47315fe48d5af51aa0593b0e0546dfff142976ba..24dd8ed485802423be0279166d6735b33f261d71 100755 (executable)
@@ -232,7 +232,7 @@ test_mc_aware()
        stop_traffic
        local ucth1=${uc_rate[1]}
 
-       start_traffic $h1 own bc bc
+       start_traffic $h1 192.0.2.65 bc bc
 
        local d0=$(date +%s)
        local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
@@ -254,7 +254,11 @@ test_mc_aware()
                        ret = 100 * ($ucth1 - $ucth2) / $ucth1
                        if (ret > 0) { ret } else { 0 }
                    ")
-       check_err $(bc <<< "$deg > 25")
+
+       # Minimum shaper of 200Mbps on MC TCs should cause about 20% of
+       # degradation on 1Gbps link.
+       check_err $(bc <<< "$deg < 15") "Minimum shaper not in effect"
+       check_err $(bc <<< "$deg > 25") "MC traffic degrades UC performance too much"
 
        local interval=$((d1 - d0))
        local mc_ir=$(rate $u0 $u1 $interval)
index e62f3d4f68da484405a5ae869ef9a40b500c7b46..78ae4aaf7141ad285847b94adf52c87229208520 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 
 CFLAGS += -I../../../../../usr/include/
-LDFLAGS += -lpthread
+LDLIBS += -lpthread
 TEST_GEN_PROGS := epoll_wakeup_test
 
 include ../../lib.mk
index b879305a766d07f0f76f5ae5cfda4eb7f5ef0ef4..5b8c0fedee7613b77456409517bcdea1b76707c3 100755 (executable)
@@ -34,6 +34,12 @@ test_modprobe()
 
 check_mods()
 {
+       local uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo "skip all tests: must be run as root" >&2
+               exit $ksft_skip
+       fi
+
        trap "test_modprobe" EXIT
        if [ ! -d $DIR ]; then
                modprobe test_firmware
index 36fb59f886ea81baef28a712c13f6d4ff204ac12..1a52f2883fe02cd5718903be31e7d6a03cc31639 100644 (file)
@@ -3,6 +3,8 @@
 # description: ftrace - stacktrace filter command
 # flags: instance
 
+[ ! -f set_ftrace_filter ] && exit_unsupported
+
 echo _do_fork:stacktrace >> set_ftrace_filter
 
 grep -q "_do_fork:stacktrace:unlimited" set_ftrace_filter
index 86a1f07ef2ca70c672b4ea897340dfde4515d2f7..71fa3f49e35e20b7bf36baf74b06813e2d1336f6 100644 (file)
@@ -15,6 +15,11 @@ if [ $NP -eq 1 ] ;then
   exit_unresolved
 fi
 
+if ! grep -q "function" available_tracers ; then
+  echo "Function trace is not enabled"
+  exit_unsupported
+fi
+
 ORIG_CPUMASK=`cat tracing_cpumask`
 
 do_reset() {
index 86986c4bba549ad770d73b2990ee76f4ebf6a5e9..5d4550591ff9f789d0f783c891b3cd766ceee27d 100644 (file)
@@ -46,6 +46,9 @@ reset_events_filter() { # reset all current setting filters
 }
 
 reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
+    if [ ! -f set_ftrace_filter ]; then
+      return 0
+    fi
     echo > set_ftrace_filter
     grep -v '^#' set_ftrace_filter | while read t; do
        tr=`echo $t | cut -d: -f2`
@@ -93,7 +96,7 @@ initialize_ftrace() { # Reset ftrace to initial-state
     disable_events
     [ -f set_event_pid ] && echo > set_event_pid
     [ -f set_ftrace_pid ] && echo > set_ftrace_pid
-    [ -f set_ftrace_filter ] && echo | tee set_ftrace_*
+    [ -f set_ftrace_notrace ] && echo > set_ftrace_notrace
     [ -f set_graph_function ] && echo | tee set_graph_*
     [ -f stack_trace_filter ] && echo > stack_trace_filter
     [ -f kprobe_events ] && echo > kprobe_events
index 5862eee91e1d9b474bb1b91b6c8628e87aa53817..6e3dbe5f96b7b76c6361f5e286c868e9c3e7edfa 100644 (file)
@@ -20,9 +20,9 @@ while read i; do
   test $N -eq 256 && break
 done
 
-L=`wc -l kprobe_events`
-if [ $L -ne $N ]; then
-  echo "The number of kprobes events ($L) is not $N"
+L=`cat kprobe_events | wc -l`
+if [ $L -ne 256 ]; then
+  echo "The number of kprobes events ($L) is not 256"
   exit_fail
 fi
 
index 1221240f8cf6ed2f5975b63a70b194aa965c617a..3f2aee115f6e22da3939ea425106e364a9efc42f 100644 (file)
@@ -21,10 +21,10 @@ grep -q "snapshot()" README || exit_unsupported # version issue
 
 echo "Test expected snapshot action failure"
 
-echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail
+echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> events/sched/sched_waking/trigger && exit_fail
 
 echo "Test expected save action failure"
 
-echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail
+echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> events/sched/sched_waking/trigger && exit_fail
 
 exit_xfail
index 064a284e4e75905c4c4c8fbaaee81641c7330a86..c80007aa9f862e187da0dc1396025ed52c6a4e5c 100644 (file)
@@ -16,7 +16,7 @@ grep -q "onchange(var)" README || exit_unsupported # version issue
 
 echo "Test onchange action"
 
-echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> events/sched/sched_waking/trigger
 
 ping $LOCALHOST -c 3
 nice -n 1 ping $LOCALHOST -c 3
index 18fff69fc433346f59e7ea3b3fe1d2ded727b605..f546c1b66a9b1d0b58bfa2a247a525d7e9647b25 100644 (file)
@@ -23,9 +23,9 @@ grep -q "snapshot()" README || exit_unsupported # version issue
 
 echo "Test snapshot action"
 
-echo 1 > /sys/kernel/debug/tracing/events/sched/enable
+echo 1 > events/sched/enable
 
-echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> events/sched/sched_waking/trigger
 
 ping $LOCALHOST -c 3
 nice -n 1 ping $LOCALHOST -c 3
index 18e1c7992d30d8e2ddf5841b792a72ab604c1e1b..fb4733faff12818ea06f36fde72d635490e8071f 100755 (executable)
@@ -9,7 +9,7 @@
 #
 #   #!/bin/sh
 #   SPDX-License-Identifier: GPL-2.0+
-#   $(dirname $0)/../kselftest_module.sh "description" module_name
+#   $(dirname $0)/../kselftest/module.sh "description" module_name
 #
 # Example: tools/testing/selftests/lib/printf.sh
 
index ec7e48118183504758bcca0b19d71f34c6b5717f..31f7c2a0a8bd462da9329d6e90b7fae60ca66bc5 100755 (executable)
@@ -3,6 +3,7 @@
 # Prefix all lines with "# ", unbuffered. Command being piped in may need
 # to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd".
 use strict;
+use IO::Handle;
 
 binmode STDIN;
 binmode STDOUT;
index 84de7bc74f2cf1f5272a8bd3b0f81ecd00a296a6..a8d20cbb711cf658ab93e4b8bfe97b04865d0845 100644 (file)
@@ -79,6 +79,7 @@ run_one()
                if [ $rc -eq $skip_rc ]; then   \
                        echo "not ok $test_num $TEST_HDR_MSG # SKIP"
                elif [ $rc -eq $timeout_rc ]; then \
+                       echo "#"
                        echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT"
                else
                        echo "not ok $test_num $TEST_HDR_MSG # exit=$rc"
index 31eb09e38729520926ab784648af744d9d172eb1..a6e3d5517a6fa0378b164bbd41e2c8f754602f00 100644 (file)
@@ -7,6 +7,9 @@
 MAX_RETRIES=600
 RETRY_INTERVAL=".1"    # seconds
 
+# Kselftest framework requirement - SKIP code is 4
+ksft_skip=4
+
 # log(msg) - write message to kernel log
 #      msg - insightful words
 function log() {
@@ -18,7 +21,16 @@ function log() {
 function skip() {
        log "SKIP: $1"
        echo "SKIP: $1" >&2
-       exit 4
+       exit $ksft_skip
+}
+
+# root test
+function is_root() {
+       uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo "skip all tests: must be run as root" >&2
+               exit $ksft_skip
+       fi
 }
 
 # die(msg) - game over, man
@@ -62,6 +74,7 @@ function set_ftrace_enabled() {
 #               for verbose livepatching output and turn on
 #               the ftrace_enabled sysctl.
 function setup_config() {
+       is_root
        push_config
        set_dynamic_debug
        set_ftrace_enabled 1
index dc2908c22c265fc0f3c4b8b4022da94c839a1a03..a082127081157de7f04a1f91e29d4f43b795d54b 100755 (executable)
@@ -8,8 +8,7 @@ MOD_LIVEPATCH=test_klp_state
 MOD_LIVEPATCH2=test_klp_state2
 MOD_LIVEPATCH3=test_klp_state3
 
-set_dynamic_debug
-
+setup_config
 
 # TEST: Loading and removing a module that modifies the system state
 
index 6e4626ae71b0caf2d8ec6d3c3bfb02f5892fa69b..8f4057310b5b41a2c6b6bf52c448efdfaa4391e5 100755 (executable)
@@ -1,6 +1,9 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 ALL_TESTS="loopback_test"
 NUM_NETIFS=2
 source tc_common.sh
@@ -72,6 +75,11 @@ setup_prepare()
 
        h1_create
        h2_create
+
+       if ethtool -k $h1 | grep loopback | grep -q fixed; then
+               log_test "SKIP: dev $h1 does not support loopback feature"
+               exit $ksft_skip
+       fi
 }
 
 cleanup()
index fef88eb4b8731fc151171fac0c302b9c1b2fc4aa..fa6a88c50750d1a885d0aa77f5e8afda603e0aa0 100755 (executable)
@@ -36,7 +36,7 @@ h2_destroy()
 {
        ip -6 route del 2001:db8:1::/64 vrf v$h2
        ip -4 route del 192.0.2.0/28 vrf v$h2
-       simple_if_fini $h2 192.0.2.130/28
+       simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64
 }
 
 router_create()
index d697815d27855586b64986cb014aa4daf7f35805..71a62e7e35b1c1bdfd78b8c43d14a2547d01c951 100755 (executable)
@@ -11,9 +11,9 @@
 #      R1 and R2 (also implemented with namespaces), with different MTUs:
 #
 #        segment a_r1    segment b_r1          a_r1: 2000
-#      .--------------R1--------------.        a_r2: 1500
-#      A                               B       a_r3: 2000
-#      '--------------R2--------------'        a_r4: 1400
+#      .--------------R1--------------.        b_r1: 1400
+#      A                               B       a_r2: 2000
+#      '--------------R2--------------'        b_r2: 1500
 #        segment a_r2    segment b_r2
 #
 #      Check that PMTU exceptions with the correct PMTU are created. Then
index 13e5ef615026f5d87b885a6b4da39299906ec994..0ea44d975b6c19d47f5b98e24f178f38c2646c19 100644 (file)
@@ -722,34 +722,6 @@ TEST_F(tls, recv_lowat)
        EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
 }
 
-TEST_F(tls, recv_rcvbuf)
-{
-       char send_mem[4096];
-       char recv_mem[4096];
-       int rcv_buf = 1024;
-
-       memset(send_mem, 0x1c, sizeof(send_mem));
-
-       EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF,
-                            &rcv_buf, sizeof(rcv_buf)), 0);
-
-       EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512);
-       memset(recv_mem, 0, sizeof(recv_mem));
-       EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512);
-       EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0);
-
-       if (self->notls)
-               return;
-
-       EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096);
-       memset(recv_mem, 0, sizeof(recv_mem));
-       EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
-       EXPECT_EQ(errno, EMSGSIZE);
-
-       EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
-       EXPECT_EQ(errno, EMSGSIZE);
-}
-
 TEST_F(tls, bidir)
 {
        char const *test_str = "test_read";
index 16571ac1dab40267fc2141f924839aa671784d1d..d3e0809ab3681fa40cff983c7e48be9d6643eca6 100755 (executable)
@@ -226,17 +226,19 @@ check_transfer()
        return 0
 }
 
-test_tcp_forwarding()
+test_tcp_forwarding_ip()
 {
        local nsa=$1
        local nsb=$2
+       local dstip=$3
+       local dstport=$4
        local lret=0
 
        ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" &
        lpid=$!
 
        sleep 1
-       ip netns exec $nsa nc -w 4 10.0.2.99 12345 < "$ns1in" > "$ns1out" &
+       ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" &
        cpid=$!
 
        sleep 3
@@ -258,6 +260,28 @@ test_tcp_forwarding()
        return $lret
 }
 
+test_tcp_forwarding()
+{
+       test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+
+       return $?
+}
+
+test_tcp_forwarding_nat()
+{
+       local lret
+
+       test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345
+       lret=$?
+
+       if [ $lret -eq 0 ] ; then
+               test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666
+               lret=$?
+       fi
+
+       return $lret
+}
+
 make_file "$ns1in" "ns1"
 make_file "$ns2in" "ns2"
 
@@ -283,14 +307,19 @@ ip -net ns2 route add 192.168.10.1 via 10.0.2.1
 # Same, but with NAT enabled.
 ip netns exec nsr1 nft -f - <<EOF
 table ip nat {
+   chain prerouting {
+      type nat hook prerouting priority 0; policy accept;
+      meta iif "veth0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345
+   }
+
    chain postrouting {
       type nat hook postrouting priority 0; policy accept;
-      meta oifname "veth1" masquerade
+      meta oifname "veth1" counter masquerade
    }
 }
 EOF
 
-test_tcp_forwarding ns1 ns2
+test_tcp_forwarding_nat ns1 ns2
 
 if [ $? -eq 0 ] ;then
        echo "PASS: flow offloaded for ns1/ns2 with NAT"
@@ -313,7 +342,7 @@ fi
 ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
 ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
 
-test_tcp_forwarding ns1 ns2
+test_tcp_forwarding_nat ns1 ns2
 if [ $? -eq 0 ] ;then
        echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
 else
index 1be55e7057800288b45706d27e874c94bba476a9..d7e07f4c3d7fc332c1c67a039539bc51999834d4 100755 (executable)
@@ -8,9 +8,14 @@ ksft_skip=4
 ret=0
 test_inet_nat=true
 
+sfx=$(mktemp -u "XXXXXXXX")
+ns0="ns0-$sfx"
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+
 cleanup()
 {
-       for i in 0 1 2; do ip netns del ns$i;done
+       for i in 0 1 2; do ip netns del ns$i-"$sfx";done
 }
 
 nft --version > /dev/null 2>&1
@@ -25,40 +30,49 @@ if [ $? -ne 0 ];then
        exit $ksft_skip
 fi
 
-ip netns add ns0
+ip netns add "$ns0"
 if [ $? -ne 0 ];then
-       echo "SKIP: Could not create net namespace"
+       echo "SKIP: Could not create net namespace $ns0"
        exit $ksft_skip
 fi
 
 trap cleanup EXIT
 
-ip netns add ns1
-ip netns add ns2
+ip netns add "$ns1"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $ns1"
+       exit $ksft_skip
+fi
+
+ip netns add "$ns2"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $ns2"
+       exit $ksft_skip
+fi
 
-ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
+ip link add veth0 netns "$ns0" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1
 if [ $? -ne 0 ];then
     echo "SKIP: No virtual ethernet pair device support in kernel"
     exit $ksft_skip
 fi
-ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+ip link add veth1 netns "$ns0" type veth peer name eth0 netns "$ns2"
 
-ip -net ns0 link set lo up
-ip -net ns0 link set veth0 up
-ip -net ns0 addr add 10.0.1.1/24 dev veth0
-ip -net ns0 addr add dead:1::1/64 dev veth0
+ip -net "$ns0" link set lo up
+ip -net "$ns0" link set veth0 up
+ip -net "$ns0" addr add 10.0.1.1/24 dev veth0
+ip -net "$ns0" addr add dead:1::1/64 dev veth0
 
-ip -net ns0 link set veth1 up
-ip -net ns0 addr add 10.0.2.1/24 dev veth1
-ip -net ns0 addr add dead:2::1/64 dev veth1
+ip -net "$ns0" link set veth1 up
+ip -net "$ns0" addr add 10.0.2.1/24 dev veth1
+ip -net "$ns0" addr add dead:2::1/64 dev veth1
 
 for i in 1 2; do
-  ip -net ns$i link set lo up
-  ip -net ns$i link set eth0 up
-  ip -net ns$i addr add 10.0.$i.99/24 dev eth0
-  ip -net ns$i route add default via 10.0.$i.1
-  ip -net ns$i addr add dead:$i::99/64 dev eth0
-  ip -net ns$i route add default via dead:$i::1
+  ip -net ns$i-$sfx link set lo up
+  ip -net ns$i-$sfx link set eth0 up
+  ip -net ns$i-$sfx addr add 10.0.$i.99/24 dev eth0
+  ip -net ns$i-$sfx route add default via 10.0.$i.1
+  ip -net ns$i-$sfx addr add dead:$i::99/64 dev eth0
+  ip -net ns$i-$sfx route add default via dead:$i::1
 done
 
 bad_counter()
@@ -66,8 +80,9 @@ bad_counter()
        local ns=$1
        local counter=$2
        local expect=$3
+       local tag=$4
 
-       echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+       echo "ERROR: $counter counter in $ns has unexpected value (expected $expect) at $tag" 1>&2
        ip netns exec $ns nft list counter inet filter $counter 1>&2
 }
 
@@ -78,24 +93,24 @@ check_counters()
 
        cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
        if [ $? -ne 0 ]; then
-               bad_counter $ns ns0in "packets 1 bytes 84"
+               bad_counter $ns ns0in "packets 1 bytes 84" "check_counters 1"
                lret=1
        fi
        cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
        if [ $? -ne 0 ]; then
-               bad_counter $ns ns0out "packets 1 bytes 84"
+               bad_counter $ns ns0out "packets 1 bytes 84" "check_counters 2"
                lret=1
        fi
 
        expect="packets 1 bytes 104"
        cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
        if [ $? -ne 0 ]; then
-               bad_counter $ns ns0in6 "$expect"
+               bad_counter $ns ns0in6 "$expect" "check_counters 3"
                lret=1
        fi
        cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
        if [ $? -ne 0 ]; then
-               bad_counter $ns ns0out6 "$expect"
+               bad_counter $ns ns0out6 "$expect" "check_counters 4"
                lret=1
        fi
 
@@ -107,41 +122,41 @@ check_ns0_counters()
        local ns=$1
        local lret=0
 
-       cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+       cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
        if [ $? -ne 0 ]; then
-               bad_counter ns0 ns0in "packets 0 bytes 0"
+               bad_counter "$ns0" ns0in "packets 0 bytes 0" "check_ns0_counters 1"
                lret=1
        fi
 
-       cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+       cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
        if [ $? -ne 0 ]; then
-               bad_counter ns0 ns0in6 "packets 0 bytes 0"
+               bad_counter "$ns0" ns0in6 "packets 0 bytes 0"
                lret=1
        fi
 
-       cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+       cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
        if [ $? -ne 0 ]; then
-               bad_counter ns0 ns0out "packets 0 bytes 0"
+               bad_counter "$ns0" ns0out "packets 0 bytes 0" "check_ns0_counters 2"
                lret=1
        fi
-       cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+       cnt=$(ip netns exec "$ns0" nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
        if [ $? -ne 0 ]; then
-               bad_counter ns0 ns0out6 "packets 0 bytes 0"
+               bad_counter "$ns0" ns0out6 "packets 0 bytes 0" "check_ns0_counters3 "
                lret=1
        fi
 
        for dir in "in" "out" ; do
                expect="packets 1 bytes 84"
-               cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ${ns}${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 $ns$dir "$expect"
+                       bad_counter "$ns0" $ns$dir "$expect" "check_ns0_counters 4"
                        lret=1
                fi
 
                expect="packets 1 bytes 104"
-               cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 $ns$dir6 "$expect"
+                       bad_counter "$ns0" $ns$dir6 "$expect" "check_ns0_counters 5"
                        lret=1
                fi
        done
@@ -152,7 +167,7 @@ check_ns0_counters()
 reset_counters()
 {
        for i in 0 1 2;do
-               ip netns exec ns$i nft reset counters inet > /dev/null
+               ip netns exec ns$i-$sfx nft reset counters inet > /dev/null
        done
 }
 
@@ -166,7 +181,7 @@ test_local_dnat6()
                IPF="ip6"
        fi
 
-ip netns exec ns0 nft -f - <<EOF
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
 table $family nat {
        chain output {
                type nat hook output priority 0; policy accept;
@@ -180,7 +195,7 @@ EOF
        fi
 
        # ping netns1, expect rewrite to netns2
-       ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+       ip netns exec "$ns0" ping -q -c 1 dead:1::99 > /dev/null
        if [ $? -ne 0 ]; then
                lret=1
                echo "ERROR: ping6 failed"
@@ -189,18 +204,18 @@ EOF
 
        expect="packets 0 bytes 0"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 ns1$dir "$expect"
+                       bad_counter "$ns0" ns1$dir "$expect" "test_local_dnat6 1"
                        lret=1
                fi
        done
 
        expect="packets 1 bytes 104"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 ns2$dir "$expect"
+                       bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat6 2"
                        lret=1
                fi
        done
@@ -208,9 +223,9 @@ EOF
        # expect 0 count in ns1
        expect="packets 0 bytes 0"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_local_dnat6 3"
                        lret=1
                fi
        done
@@ -218,15 +233,15 @@ EOF
        # expect 1 packet in ns2
        expect="packets 1 bytes 104"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns0$dir "$expect"
+                       bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat6 4"
                        lret=1
                fi
        done
 
-       test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was $family NATted to ns2"
-       ip netns exec ns0 nft flush chain ip6 nat output
+       test $lret -eq 0 && echo "PASS: ipv6 ping to $ns1 was $family NATted to $ns2"
+       ip netns exec "$ns0" nft flush chain ip6 nat output
 
        return $lret
 }
@@ -241,7 +256,7 @@ test_local_dnat()
                IPF="ip"
        fi
 
-ip netns exec ns0 nft -f - <<EOF 2>/dev/null
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF 2>/dev/null
 table $family nat {
        chain output {
                type nat hook output priority 0; policy accept;
@@ -260,7 +275,7 @@ EOF
        fi
 
        # ping netns1, expect rewrite to netns2
-       ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+       ip netns exec "$ns0" ping -q -c 1 10.0.1.99 > /dev/null
        if [ $? -ne 0 ]; then
                lret=1
                echo "ERROR: ping failed"
@@ -269,18 +284,18 @@ EOF
 
        expect="packets 0 bytes 0"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 ns1$dir "$expect"
+                       bad_counter "$ns0" ns1$dir "$expect" "test_local_dnat 1"
                        lret=1
                fi
        done
 
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 ns2$dir "$expect"
+                       bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat 2"
                        lret=1
                fi
        done
@@ -288,9 +303,9 @@ EOF
        # expect 0 count in ns1
        expect="packets 0 bytes 0"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_local_dnat 3"
                        lret=1
                fi
        done
@@ -298,19 +313,19 @@ EOF
        # expect 1 packet in ns2
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns0$dir "$expect"
+                       bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat 4"
                        lret=1
                fi
        done
 
-       test $lret -eq 0 && echo "PASS: ping to ns1 was $family NATted to ns2"
+       test $lret -eq 0 && echo "PASS: ping to $ns1 was $family NATted to $ns2"
 
-       ip netns exec ns0 nft flush chain $family nat output
+       ip netns exec "$ns0" nft flush chain $family nat output
 
        reset_counters
-       ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+       ip netns exec "$ns0" ping -q -c 1 10.0.1.99 > /dev/null
        if [ $? -ne 0 ]; then
                lret=1
                echo "ERROR: ping failed"
@@ -319,17 +334,17 @@ EOF
 
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns1$dir "$expect"
+                       bad_counter "$ns1" ns1$dir "$expect" "test_local_dnat 5"
                        lret=1
                fi
        done
        expect="packets 0 bytes 0"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 ns2$dir "$expect"
+                       bad_counter "$ns0" ns2$dir "$expect" "test_local_dnat 6"
                        lret=1
                fi
        done
@@ -337,9 +352,9 @@ EOF
        # expect 1 count in ns1
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns0 ns0$dir "$expect"
+                       bad_counter "$ns0" ns0$dir "$expect" "test_local_dnat 7"
                        lret=1
                fi
        done
@@ -347,14 +362,14 @@ EOF
        # expect 0 packet in ns2
        expect="packets 0 bytes 0"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns2$dir "$expect"
+                       bad_counter "$ns2" ns0$dir "$expect" "test_local_dnat 8"
                        lret=1
                fi
        done
 
-       test $lret -eq 0 && echo "PASS: ping to ns1 OK after $family nat output chain flush"
+       test $lret -eq 0 && echo "PASS: ping to $ns1 OK after $family nat output chain flush"
 
        return $lret
 }
@@ -366,26 +381,26 @@ test_masquerade6()
        local natflags=$2
        local lret=0
 
-       ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
 
-       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+               echo "ERROR: cannot ping $ns1 from $ns2 via ipv6"
                return 1
                lret=1
        fi
 
        expect="packets 1 bytes 104"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns2$dir "$expect"
+                       bad_counter "$ns1" ns2$dir "$expect" "test_masquerade6 1"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns2" ns1$dir "$expect" "test_masquerade6 2"
                        lret=1
                fi
        done
@@ -393,7 +408,7 @@ test_masquerade6()
        reset_counters
 
 # add masquerading rule
-ip netns exec ns0 nft -f - <<EOF
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
 table $family nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
@@ -406,24 +421,24 @@ EOF
                return $ksft_skip
        fi
 
-       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags"
+               echo "ERROR: cannot ping $ns1 from $ns2 with active $family masquerade $natflags"
                lret=1
        fi
 
        # ns1 should have seen packets from ns0, due to masquerade
        expect="packets 1 bytes 104"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_masquerade6 3"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns2" ns1$dir "$expect" "test_masquerade6 4"
                        lret=1
                fi
        done
@@ -431,32 +446,32 @@ EOF
        # ns1 should not have seen packets from ns2, due to masquerade
        expect="packets 0 bytes 0"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_masquerade6 5"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns0" ns1$dir "$expect" "test_masquerade6 6"
                        lret=1
                fi
        done
 
-       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)"
+               echo "ERROR: cannot ping $ns1 from $ns2 with active ipv6 masquerade $natflags (attempt 2)"
                lret=1
        fi
 
-       ip netns exec ns0 nft flush chain $family nat postrouting
+       ip netns exec "$ns0" nft flush chain $family nat postrouting
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not flush $family nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for ns2"
+       test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for $ns2"
 
        return $lret
 }
@@ -467,26 +482,26 @@ test_masquerade()
        local natflags=$2
        local lret=0
 
-       ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
-       ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
 
-       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 $natflags"
+               echo "ERROR: cannot ping $ns1 from "$ns2" $natflags"
                lret=1
        fi
 
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns2$dir "$expect"
+                       bad_counter "$ns1" ns2$dir "$expect" "test_masquerade 1"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns2" ns1$dir "$expect" "test_masquerade 2"
                        lret=1
                fi
        done
@@ -494,7 +509,7 @@ test_masquerade()
        reset_counters
 
 # add masquerading rule
-ip netns exec ns0 nft -f - <<EOF
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
 table $family nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
@@ -507,24 +522,24 @@ EOF
                return $ksft_skip
        fi
 
-       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags"
+               echo "ERROR: cannot ping $ns1 from $ns2 with active $family masquerade $natflags"
                lret=1
        fi
 
        # ns1 should have seen packets from ns0, due to masquerade
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_masquerade 3"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns2" ns1$dir "$expect" "test_masquerade 4"
                        lret=1
                fi
        done
@@ -532,32 +547,32 @@ EOF
        # ns1 should not have seen packets from ns2, due to masquerade
        expect="packets 0 bytes 0"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_masquerade 5"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns0" ns1$dir "$expect" "test_masquerade 6"
                        lret=1
                fi
        done
 
-       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)"
+               echo "ERROR: cannot ping $ns1 from $ns2 with active ip masquerade $natflags (attempt 2)"
                lret=1
        fi
 
-       ip netns exec ns0 nft flush chain $family nat postrouting
+       ip netns exec "$ns0" nft flush chain $family nat postrouting
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not flush $family nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for ns2"
+       test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for $ns2"
 
        return $lret
 }
@@ -567,25 +582,25 @@ test_redirect6()
        local family=$1
        local lret=0
 
-       ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
 
-       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+               echo "ERROR: cannnot ping $ns1 from $ns2 via ipv6"
                lret=1
        fi
 
        expect="packets 1 bytes 104"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns2$dir "$expect"
+                       bad_counter "$ns1" ns2$dir "$expect" "test_redirect6 1"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns2" ns1$dir "$expect" "test_redirect6 2"
                        lret=1
                fi
        done
@@ -593,7 +608,7 @@ test_redirect6()
        reset_counters
 
 # add redirect rule
-ip netns exec ns0 nft -f - <<EOF
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
 table $family nat {
        chain prerouting {
                type nat hook prerouting priority 0; policy accept;
@@ -606,18 +621,18 @@ EOF
                return $ksft_skip
        fi
 
-       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 via ipv6 with active $family redirect"
+               echo "ERROR: cannot ping $ns1 from $ns2 via ipv6 with active $family redirect"
                lret=1
        fi
 
        # ns1 should have seen no packets from ns2, due to redirection
        expect="packets 0 bytes 0"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_redirect6 3"
                        lret=1
                fi
        done
@@ -625,20 +640,20 @@ EOF
        # ns0 should have seen packets from ns2, due to masquerade
        expect="packets 1 bytes 104"
        for dir in "in6" "out6" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_redirect6 4"
                        lret=1
                fi
        done
 
-       ip netns exec ns0 nft delete table $family nat
+       ip netns exec "$ns0" nft delete table $family nat
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not delete $family nat table" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: $family IPv6 redirection for ns2"
+       test $lret -eq 0 && echo "PASS: $family IPv6 redirection for $ns2"
 
        return $lret
 }
@@ -648,26 +663,26 @@ test_redirect()
        local family=$1
        local lret=0
 
-       ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
-       ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+       ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
 
-       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2"
+               echo "ERROR: cannot ping $ns1 from $ns2"
                lret=1
        fi
 
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns2$dir "$expect"
+                       bad_counter "$ns1" $ns2$dir "$expect" "test_redirect 1"
                        lret=1
                fi
 
-               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns2 ns1$dir "$expect"
+                       bad_counter "$ns2" ns1$dir "$expect" "test_redirect 2"
                        lret=1
                fi
        done
@@ -675,7 +690,7 @@ test_redirect()
        reset_counters
 
 # add redirect rule
-ip netns exec ns0 nft -f - <<EOF
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
 table $family nat {
        chain prerouting {
                type nat hook prerouting priority 0; policy accept;
@@ -688,9 +703,9 @@ EOF
                return $ksft_skip
        fi
 
-       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active $family ip redirect"
+               echo "ERROR: cannot ping $ns1 from $ns2 with active $family ip redirect"
                lret=1
        fi
 
@@ -698,9 +713,9 @@ EOF
        expect="packets 0 bytes 0"
        for dir in "in" "out" ; do
 
-               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns1" ns0$dir "$expect" "test_redirect 3"
                        lret=1
                fi
        done
@@ -708,28 +723,28 @@ EOF
        # ns0 should have seen packets from ns2, due to masquerade
        expect="packets 1 bytes 84"
        for dir in "in" "out" ; do
-               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               cnt=$(ip netns exec "$ns0" nft list counter inet filter ns2${dir} | grep -q "$expect")
                if [ $? -ne 0 ]; then
-                       bad_counter ns1 ns0$dir "$expect"
+                       bad_counter "$ns0" ns0$dir "$expect" "test_redirect 4"
                        lret=1
                fi
        done
 
-       ip netns exec ns0 nft delete table $family nat
+       ip netns exec "$ns0" nft delete table $family nat
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not delete $family nat table" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: $family IP redirection for ns2"
+       test $lret -eq 0 && echo "PASS: $family IP redirection for $ns2"
 
        return $lret
 }
 
 
-# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
 for i in 0 1 2; do
-ip netns exec ns$i nft -f - <<EOF
+ip netns exec ns$i-$sfx nft -f /dev/stdin <<EOF
 table inet filter {
        counter ns0in {}
        counter ns1in {}
@@ -796,18 +811,18 @@ done
 sleep 3
 # test basic connectivity
 for i in 1 2; do
-  ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+  ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 > /dev/null
   if [ $? -ne 0 ];then
        echo "ERROR: Could not reach other namespace(s)" 1>&2
        ret=1
   fi
 
-  ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+  ip netns exec "$ns0" ping -c 1 -q dead:$i::99 > /dev/null
   if [ $? -ne 0 ];then
        echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
        ret=1
   fi
-  check_counters ns$i
+  check_counters ns$i-$sfx
   if [ $? -ne 0 ]; then
        ret=1
   fi
@@ -820,7 +835,7 @@ for i in 1 2; do
 done
 
 if [ $ret -eq 0 ];then
-       echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+       echo "PASS: netns routing/connectivity: $ns0 can reach $ns1 and $ns2"
 fi
 
 reset_counters
@@ -846,4 +861,9 @@ reset_counters
 $test_inet_nat && test_redirect inet
 $test_inet_nat && test_redirect6 inet
 
+if [ $ret -ne 0 ];then
+       echo -n "FAIL: "
+       nft --version
+fi
+
 exit $ret
index 4e9485590c10d5535a08ebf030518fcecb2e7be9..1dbfb62567d2fff9fb60c51afc91277653be01d9 100755 (executable)
@@ -15,8 +15,15 @@ then
        exit 0
 fi
 ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
-idlecpus=`mpstat | tail -1 | \
-       awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
+if mpstat -V > /dev/null 2>&1
+then
+       idlecpus=`mpstat | tail -1 | \
+               awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
+else
+       # No mpstat command, so use all available CPUs.
+       echo The mpstat command is not available, so greedily using all CPUs.
+       idlecpus=$ncpus
+fi
 awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
 BEGIN {
        cpus2use = idlecpus;
index dc49a3ba6111ec8f3323dfa0eebb74a58f47b43f..30cb5b27d32e56cb629b36a911d8bda029db2def 100755 (executable)
@@ -23,25 +23,39 @@ spinmax=${4-1000}
 
 n=1
 
-starttime=`awk 'BEGIN { print systime(); }' < /dev/null`
+starttime=`gawk 'BEGIN { print systime(); }' < /dev/null`
+
+nohotplugcpus=
+for i in /sys/devices/system/cpu/cpu[0-9]*
+do
+       if test -f $i/online
+       then
+               :
+       else
+               curcpu=`echo $i | sed -e 's/^[^0-9]*//'`
+               nohotplugcpus="$nohotplugcpus $curcpu"
+       fi
+done
 
 while :
 do
        # Check for done.
-       t=`awk -v s=$starttime 'BEGIN { print systime() - s; }' < /dev/null`
+       t=`gawk -v s=$starttime 'BEGIN { print systime() - s; }' < /dev/null`
        if test "$t" -gt "$duration"
        then
                exit 0;
        fi
 
        # Set affinity to randomly selected online CPU
-       cpus=`grep 1 /sys/devices/system/cpu/*/online |
-               sed -e 's,/[^/]*$,,' -e 's/^[^0-9]*//'`
-
-       # Do not leave out poor old cpu0 which may not be hot-pluggable
-       if [ ! -f "/sys/devices/system/cpu/cpu0/online" ]; then
-               cpus="0 $cpus"
+       if cpus=`grep 1 /sys/devices/system/cpu/*/online 2>&1 |
+                sed -e 's,/[^/]*$,,' -e 's/^[^0-9]*//'`
+       then
+               :
+       else
+               cpus=
        fi
+       # Do not leave out non-hot-pluggable CPUs
+       cpus="$cpus $nohotplugcpus"
 
        cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
                srand(n + me + systime());
index 2a7f3f4756a740a67a48d60e74f58ef0c4fc70ca..9d9a41625dd90061046d3bd272fcf226bf59c0b1 100755 (executable)
@@ -25,6 +25,7 @@ stopstate="`grep 'End-test grace-period state: g' $i/console.log 2> /dev/null |
            tail -1 | sed -e 's/^\[[ 0-9.]*] //' |
            awk '{ print \"[\" $1 \" \" $5 \" \" $6 \" \" $7 \"]\"; }' |
            tr -d '\012\015'`"
+fwdprog="`grep 'rcu_torture_fwd_prog_cr Duration' $i/console.log 2> /dev/null | sed -e 's/^\[[^]]*] //' | sort -k15nr | head -1 | awk '{ print $14 " " $15 }'`"
 if test -z "$ngps"
 then
        echo "$configfile ------- " $stopstate
@@ -39,7 +40,7 @@ else
                        BEGIN { print ngps / dur }' < /dev/null`
                title="$title ($ngpsps/s)"
        fi
-       echo $title $stopstate
+       echo $title $stopstate $fwdprog
        nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
        if test -z "$nclosecalls"
        then
index 33c669619736449e7b0d0983cea60b69eff6e4a1..e0352304b98b4c7f045e67560e8c65dfb2a4a6a3 100755 (executable)
@@ -123,7 +123,7 @@ qemu_args=$5
 boot_args=$6
 
 cd $KVM
-kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
+kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
 if test -z "$TORTURE_BUILDONLY"
 then
        echo ' ---' `date`: Starting kernel
@@ -133,11 +133,10 @@ fi
 qemu_args="-enable-kvm -nographic $qemu_args"
 cpu_count=`configNR_CPUS.sh $resdir/ConfigFragment`
 cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
-vcpus=`identify_qemu_vcpus`
-if test $cpu_count -gt $vcpus
+if test "$cpu_count" -gt "$TORTURE_ALLOTED_CPUS"
 then
-       echo CPU count limited from $cpu_count to $vcpus | tee -a $resdir/Warnings
-       cpu_count=$vcpus
+       echo CPU count limited from $cpu_count to $TORTURE_ALLOTED_CPUS | tee -a $resdir/Warnings
+       cpu_count=$TORTURE_ALLOTED_CPUS
 fi
 qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
 
@@ -177,7 +176,7 @@ do
        then
                qemu_pid=`cat "$resdir/qemu_pid"`
        fi
-       kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
+       kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
        if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
        then
                if test $kruntime -ge $seconds
@@ -213,7 +212,7 @@ then
        oldline="`tail $resdir/console.log`"
        while :
        do
-               kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
+               kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
                if kill -0 $qemu_pid > /dev/null 2>&1
                then
                        :
index 72518580df2367f024e5a257bc1340c655b2ecfd..78d18ab8e95482dd18f32cef45833539f13d76e2 100755 (executable)
@@ -24,7 +24,9 @@ dur=$((30*60))
 dryrun=""
 KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
 PATH=${KVM}/bin:$PATH; export PATH
-TORTURE_ALLOTED_CPUS=""
+. functions.sh
+
+TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`"
 TORTURE_DEFCONFIG=defconfig
 TORTURE_BOOT_IMAGE=""
 TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD
@@ -40,8 +42,6 @@ cpus=0
 ds=`date +%Y.%m.%d-%H:%M:%S`
 jitter="-1"
 
-. functions.sh
-
 usage () {
        echo "Usage: $scriptname optional arguments:"
        echo "       --bootargs kernel-boot-arguments"
@@ -93,6 +93,11 @@ do
                checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--'
                cpus=$2
                TORTURE_ALLOTED_CPUS="$2"
+               max_cpus="`identify_qemu_vcpus`"
+               if test "$TORTURE_ALLOTED_CPUS" -gt "$max_cpus"
+               then
+                       TORTURE_ALLOTED_CPUS=$max_cpus
+               fi
                shift
                ;;
        --datestamp)
@@ -198,9 +203,10 @@ fi
 
 CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
 
+defaultconfigs="`tr '\012' ' ' < $CONFIGFRAG/CFLIST`"
 if test -z "$configs"
 then
-       configs="`cat $CONFIGFRAG/CFLIST`"
+       configs=$defaultconfigs
 fi
 
 if test -z "$resdir"
@@ -209,7 +215,7 @@ then
 fi
 
 # Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
-touch $T/cfgcpu
+configs_derep=
 for CF in $configs
 do
        case $CF in
@@ -222,15 +228,21 @@ do
                CF1=$CF
                ;;
        esac
+       for ((cur_rep=0;cur_rep<$config_reps;cur_rep++))
+       do
+               configs_derep="$configs_derep $CF1"
+       done
+done
+touch $T/cfgcpu
+configs_derep="`echo $configs_derep | sed -e "s/\<CFLIST\>/$defaultconfigs/g"`"
+for CF1 in $configs_derep
+do
        if test -f "$CONFIGFRAG/$CF1"
        then
                cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1`
                cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"`
                cpu_count=`configfrag_boot_maxcpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"`
-               for ((cur_rep=0;cur_rep<$config_reps;cur_rep++))
-               do
-                       echo $CF1 $cpu_count >> $T/cfgcpu
-               done
+               echo $CF1 $cpu_count >> $T/cfgcpu
        else
                echo "The --configs file $CF1 does not exist, terminating."
                exit 1
index 6fa9bd1ddc0940a357a8f8c6dc4f9c8c73ac0a77..38e424d2392cc2766fdb5c3ddd7f4bddacf04649 100755 (executable)
@@ -20,58 +20,9 @@ if [ -s "$D/initrd/init" ]; then
     exit 0
 fi
 
-T=${TMPDIR-/tmp}/mkinitrd.sh.$$
-trap 'rm -rf $T' 0 2
-mkdir $T
-
-cat > $T/init << '__EOF___'
-#!/bin/sh
-# Run in userspace a few milliseconds every second.  This helps to
-# exercise the NO_HZ_FULL portions of RCU.  The 192 instances of "a" was
-# empirically shown to give a nice multi-millisecond burst of user-mode
-# execution on a 2GHz CPU, as desired.  Modern CPUs will vary from a
-# couple of milliseconds up to perhaps 100 milliseconds, which is an
-# acceptable range.
-#
-# Why not calibrate an exact delay?  Because within this initrd, we
-# are restricted to Bourne-shell builtins, which as far as I know do not
-# provide any means of obtaining a fine-grained timestamp.
-
-a4="a a a a"
-a16="$a4 $a4 $a4 $a4"
-a64="$a16 $a16 $a16 $a16"
-a192="$a64 $a64 $a64"
-while :
-do
-       q=
-       for i in $a192
-       do
-               q="$q $i"
-       done
-       sleep 1
-done
-__EOF___
-
-# Try using dracut to create initrd
-if command -v dracut >/dev/null 2>&1
-then
-       echo Creating $D/initrd using dracut.
-       # Filesystem creation
-       dracut --force --no-hostonly --no-hostonly-cmdline --module "base" $T/initramfs.img
-       cd $D
-       mkdir -p initrd
-       cd initrd
-       zcat $T/initramfs.img | cpio -id
-       cp $T/init init
-       chmod +x init
-       echo Done creating $D/initrd using dracut
-       exit 0
-fi
-
-# No dracut, so create a C-language initrd/init program and statically
-# link it.  This results in a very small initrd, but might be a bit less
-# future-proof than dracut.
-echo "Could not find dracut, attempting C initrd"
+# Create a C-language initrd/init infinite-loop program and statically
+# link it.  This results in a very small initrd.
+echo "Creating a statically linked C-language initrd"
 cd $D
 mkdir -p initrd
 cd initrd
index eec2663261f2ac8bfc09085049ea0d908f8851c5..e8a657a5f48a04db13532048daa64a454e8cc6d4 100644 (file)
@@ -15,7 +15,7 @@
 #include <errno.h>
 #include <stddef.h>
 
-static inline pid_t gettid(void)
+static inline pid_t rseq_gettid(void)
 {
        return syscall(__NR_gettid);
 }
@@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg)
                rseq_percpu_unlock(&data->lock, cpu);
 #ifndef BENCHMARK
                if (i != 0 && !(i % (reps / 10)))
-                       printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+                       printf_verbose("tid %d: count %lld\n",
+                                      (int) rseq_gettid(), i);
 #endif
        }
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && thread_data->reg &&
            rseq_unregister_current_thread())
                abort();
@@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg)
                } while (rseq_unlikely(ret));
 #ifndef BENCHMARK
                if (i != 0 && !(i % (reps / 10)))
-                       printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+                       printf_verbose("tid %d: count %lld\n",
+                                      (int) rseq_gettid(), i);
 #endif
        }
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && thread_data->reg &&
            rseq_unregister_current_thread())
                abort();
@@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
@@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
@@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg)
        }
 
        printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
-                      (int) gettid(), nr_abort, signals_delivered);
+                      (int) rseq_gettid(), nr_abort, signals_delivered);
        if (!opt_disable_rseq && rseq_unregister_current_thread())
                abort();
 
index d40d60e7499e8aa2f814f7282092692db84fcf73..3f63eb362b92fbd709332043c862f7d67390323d 100644 (file)
@@ -149,11 +149,13 @@ static inline void rseq_clear_rseq_cs(void)
 /*
  * rseq_prepare_unload() should be invoked by each thread executing a rseq
  * critical section at least once between their last critical section and
- * library unload of the library defining the rseq critical section
- * (struct rseq_cs). This also applies to use of rseq in code generated by
- * JIT: rseq_prepare_unload() should be invoked at least once by each
- * thread executing a rseq critical section before reclaim of the memory
- * holding the struct rseq_cs.
+ * library unload of the library defining the rseq critical section (struct
+ * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
+ * post_commit_offset fields. This also applies to use of rseq in code
+ * generated by JIT: rseq_prepare_unload() should be invoked at least once by
+ * each thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs or reclaim of the code pointed to by struct
+ * rseq_cs start_ip and post_commit_offset fields.
  */
 static inline void rseq_prepare_unload(void)
 {
diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
new file mode 100644 (file)
index 0000000..e7b9417
--- /dev/null
@@ -0,0 +1 @@
+timeout=0
index 98da7a5047375a16167120f950f895e4a07ef636..fa02c4d5ec13c1e1071f433c708d08265bbe581e 100644 (file)
@@ -1,8 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for mount selftests.
-CFLAGS = -Wall -lcap -O2
+CFLAGS = -Wall -O2
+LDLIBS = -lcap
 
-TEST_PROGS := run_tests.sh
+TEST_PROGS := safesetid-test.sh
 TEST_GEN_FILES := safesetid-test
 
 include ../lib.mk
index 8f40c6ecdad188458d526af08f4bf4731d34c30c..0c4d50644c1311108f249fa0cdd3a8b425cb6fbd 100644 (file)
@@ -213,7 +213,8 @@ static void test_setuid(uid_t child_uid, bool expect_success)
        }
 
        if (cpid == 0) {            /* Code executed by child */
-               setuid(child_uid);
+               if (setuid(child_uid) < 0)
+                       exit(EXIT_FAILURE);
                if (getuid() == child_uid)
                        exit(EXIT_SUCCESS);
                else
@@ -291,8 +292,10 @@ int main(int argc, char **argv)
 
        // First test to make sure we can write userns mappings from a user
        // that doesn't have any restrictions (as long as it has CAP_SETUID);
-       setuid(NO_POLICY_USER);
-       setgid(NO_POLICY_USER);
+       if (setuid(NO_POLICY_USER) < 0)
+               die("Error with set uid(%d)\n", NO_POLICY_USER);
+       if (setgid(NO_POLICY_USER) < 0)
+               die("Error with set gid(%d)\n", NO_POLICY_USER);
 
        // Take away all but setid caps
        drop_caps(true);
@@ -306,8 +309,10 @@ int main(int argc, char **argv)
                die("test_userns failed when it should work\n");
        }
 
-       setuid(RESTRICTED_PARENT);
-       setgid(RESTRICTED_PARENT);
+       if (setuid(RESTRICTED_PARENT) < 0)
+               die("Error with set uid(%d)\n", RESTRICTED_PARENT);
+       if (setgid(RESTRICTED_PARENT) < 0)
+               die("Error with set gid(%d)\n", RESTRICTED_PARENT);
 
        test_setuid(ROOT_USER, false);
        test_setuid(ALLOWED_CHILD1, true);
index 6944b898bb530929f8436873b9af395cd81e20cd..ee1b727ede045dfd920bf319d3729efd217ef137 100644 (file)
@@ -3158,7 +3158,18 @@ TEST(user_notification_basic)
        EXPECT_GT(poll(&pollfd, 1, -1), 0);
        EXPECT_EQ(pollfd.revents, POLLIN);
 
-       EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+       /* Test that we can't pass garbage to the kernel. */
+       memset(&req, 0, sizeof(req));
+       req.pid = -1;
+       errno = 0;
+       ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
+       EXPECT_EQ(-1, ret);
+       EXPECT_EQ(EINVAL, errno);
+
+       if (ret) {
+               req.pid = 0;
+               EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+       }
 
        pollfd.fd = listener;
        pollfd.events = POLLIN | POLLOUT;
@@ -3278,6 +3289,7 @@ TEST(user_notification_signal)
 
        close(sk_pair[1]);
 
+       memset(&req, 0, sizeof(req));
        EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
 
        EXPECT_EQ(kill(pid, SIGUSR1), 0);
@@ -3296,6 +3308,7 @@ TEST(user_notification_signal)
        EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
        EXPECT_EQ(errno, ENOENT);
 
+       memset(&req, 0, sizeof(req));
        EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
 
        resp.id = req.id;
index 76ae03a64506b76f73d9f5153e6ccc1081c0e898..2e361cea63bcd3df75f5628249e82ecaf788f51e 100644 (file)
         ]
     },
     {
-        "id": "6f5e",
+        "id": "b99c",
         "name": "Add basic filter with cmp ematch u8/transport layer and default action",
         "category": [
             "filter",
index 0f89cd50a94b67aa9fe0e6a87955e43df51ab3fa..8877f7b2b809dcb6cfe68635ce0148de38d6beef 100644 (file)
@@ -1,26 +1,4 @@
 [
-    {
-        "id": "e9a3",
-        "name": "Add u32 with source match",
-        "category": [
-            "filter",
-            "u32"
-        ],
-        "plugins": {
-                "requires": "nsPlugin"
-        },
-        "setup": [
-            "$TC qdisc add dev $DEV1 ingress"
-        ],
-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok",
-        "expExitCode": "0",
-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
-        "matchPattern": "match 7f000001/ffffffff at 12",
-        "matchCount": "1",
-        "teardown": [
-            "$TC qdisc del dev $DEV1 ingress"
-        ]
-    },
     {
         "id": "2638",
         "name": "Add matchall and try to get it",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json
new file mode 100644 (file)
index 0000000..e09d3c0
--- /dev/null
@@ -0,0 +1,205 @@
+[
+    {
+        "id": "afa9",
+        "name": "Add u32 with source match",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.1/32 flowid 1:1 action ok",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol ip pref 1 u32 chain (0[ ]+$|0 fh 800: ht divisor 1|0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1.*match 7f000001/ffffffff at 12)",
+        "matchCount": "3",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "6aa7",
+        "name": "Add/Replace u32 with source match and invalid indev",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.1/32 indev notexist20 flowid 1:1 action ok",
+        "expExitCode": "2",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol ip pref 1 u32 chain 0",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "bc4d",
+        "name": "Replace valid u32 with source match and invalid indev",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress",
+            "$TC filter add dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.3/32 flowid 1:3 action ok"
+        ],
+        "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 1 u32 match ip src 127.0.0.2/32 indev notexist20 flowid 1:2 action ok",
+        "expExitCode": "2",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol ip pref 1 u32 chain (0[ ]+$|0 fh 800: ht divisor 1|0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:3.*match 7f000003/ffffffff at 12)",
+        "matchCount": "3",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "648b",
+        "name": "Add u32 with custom hash table",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 ingress prio 99 handle 42: u32 divisor 256",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "pref 99 u32 chain (0[ ]+$|0 fh 42: ht divisor 256|0 fh 800: ht divisor 1)",
+        "matchCount": "3",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "6658",
+        "name": "Add/Replace u32 with custom hash table and invalid handle",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter replace dev $DEV1 ingress prio 99 handle 42:42 u32 divisor 256",
+        "expExitCode": "2",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "pref 99 u32 chain 0",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "9d0a",
+        "name": "Replace valid u32 with custom hash table and invalid handle",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress",
+            "$TC filter add dev $DEV1 ingress prio 99 handle 42: u32 divisor 256"
+        ],
+        "cmdUnderTest": "$TC filter replace dev $DEV1 ingress prio 99 handle 42:42 u32 divisor 128",
+        "expExitCode": "2",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "pref 99 u32 chain (0[ ]+$|0 fh 42: ht divisor 256|0 fh 800: ht divisor 1)",
+        "matchCount": "3",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "1644",
+        "name": "Add u32 filter that links to a custom hash table",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress",
+            "$TC filter add dev $DEV1 ingress prio 99 handle 43: u32 divisor 256"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 ingress protocol ip prio 98 u32 link 43: hashkey mask 0x0000ff00 at 12 match ip src 192.168.0.0/16",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol ip pref 98 u32 chain (0[ ]+$|0 fh 801: ht divisor 1|0 fh 801::800 order 2048 key ht 801 bkt 0 link 43:.*match c0a80000/ffff0000 at 12.*hash mask 0000ff00 at 12)",
+        "matchCount": "3",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "74c2",
+        "name": "Add/Replace u32 filter with invalid hash table id",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 20 u32 ht 47:47 action drop",
+        "expExitCode": "2",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol ip pref 20 u32 chain 0",
+        "matchCount": "0",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    },
+    {
+        "id": "1fe6",
+        "name": "Replace valid u32 filter with invalid hash table id",
+        "category": [
+            "filter",
+            "u32"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "$TC qdisc add dev $DEV1 ingress",
+            "$TC filter add dev $DEV1 ingress protocol ip prio 99 handle 43: u32 divisor 1",
+            "$TC filter add dev $DEV1 ingress protocol ip prio 98 u32 ht 43: match tcp src 22 FFFF classid 1:3"
+        ],
+        "cmdUnderTest": "$TC filter replace dev $DEV1 ingress protocol ip prio 98 u32 ht 43:1 match tcp src 23 FFFF classid 1:4",
+        "expExitCode": "2",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol ip pref 99 u32 chain (0[ ]+$|0 fh (43|800): ht divisor 1|0 fh 43::800 order 2048 key ht 43 bkt 0 flowid 1:3.*match 00160000/ffff0000 at nexthdr\\+0)",
+        "matchCount": "4",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
+    }
+]
diff --git a/tools/testing/selftests/timens/.gitignore b/tools/testing/selftests/timens/.gitignore
new file mode 100644 (file)
index 0000000..789f21e
--- /dev/null
@@ -0,0 +1,8 @@
+clock_nanosleep
+exec
+gettime_perf
+gettime_perf_cold
+procfs
+timens
+timer
+timerfd
diff --git a/tools/testing/selftests/timens/Makefile b/tools/testing/selftests/timens/Makefile
new file mode 100644 (file)
index 0000000..e9fb30b
--- /dev/null
@@ -0,0 +1,7 @@
+TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec
+TEST_GEN_PROGS_EXTENDED := gettime_perf
+
+CFLAGS := -Wall -Werror -pthread
+LDFLAGS := -lrt -ldl
+
+include ../lib.mk
diff --git a/tools/testing/selftests/timens/clock_nanosleep.c b/tools/testing/selftests/timens/clock_nanosleep.c
new file mode 100644 (file)
index 0000000..8e7b7c7
--- /dev/null
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <sys/timerfd.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+
+#include "log.h"
+#include "timens.h"
+
+void test_sig(int sig)
+{
+       if (sig == SIGUSR2)
+               pthread_exit(NULL);
+}
+
+struct thread_args {
+       struct timespec *now, *rem;
+       pthread_mutex_t *lock;
+       int clockid;
+       int abs;
+};
+
+void *call_nanosleep(void *_args)
+{
+       struct thread_args *args = _args;
+
+       clock_nanosleep(args->clockid, args->abs ? TIMER_ABSTIME : 0, args->now, args->rem);
+       pthread_mutex_unlock(args->lock);
+       return NULL;
+}
+
+int run_test(int clockid, int abs)
+{
+       struct timespec now = {}, rem;
+       struct thread_args args = { .now = &now, .rem = &rem, .clockid = clockid};
+       struct timespec start;
+       pthread_mutex_t lock;
+       pthread_t thread;
+       int j, ok, ret;
+
+       signal(SIGUSR1, test_sig);
+       signal(SIGUSR2, test_sig);
+
+       pthread_mutex_init(&lock, NULL);
+       pthread_mutex_lock(&lock);
+
+       if (clock_gettime(clockid, &start) == -1) {
+               if (errno == EINVAL && check_skip(clockid))
+                       return 0;
+               return pr_perror("clock_gettime");
+       }
+
+
+       if (abs) {
+               now.tv_sec = start.tv_sec;
+               now.tv_nsec = start.tv_nsec;
+       }
+
+       now.tv_sec += 3600;
+       args.abs = abs;
+       args.lock = &lock;
+       ret = pthread_create(&thread, NULL, call_nanosleep, &args);
+       if (ret != 0) {
+               pr_err("Unable to create a thread: %s", strerror(ret));
+               return 1;
+       }
+
+       /* Wait when the thread will call clock_nanosleep(). */
+       ok = 0;
+       for (j = 0; j < 8; j++) {
+               /* The maximum timeout is about 5 seconds. */
+               usleep(10000 << j);
+
+               /* Try to interrupt clock_nanosleep(). */
+               pthread_kill(thread, SIGUSR1);
+
+               usleep(10000 << j);
+               /* Check whether clock_nanosleep() has been interrupted or not. */
+               if (pthread_mutex_trylock(&lock) == 0) {
+                       /**/
+                       ok = 1;
+                       break;
+               }
+       }
+       if (!ok)
+               pthread_kill(thread, SIGUSR2);
+       pthread_join(thread, NULL);
+       pthread_mutex_destroy(&lock);
+
+       if (!ok) {
+               ksft_test_result_pass("clockid: %d abs:%d timeout\n", clockid, abs);
+               return 1;
+       }
+
+       if (rem.tv_sec < 3300 || rem.tv_sec > 3900) {
+               pr_fail("clockid: %d abs: %d remain: %ld\n",
+                       clockid, abs, rem.tv_sec);
+               return 1;
+       }
+       ksft_test_result_pass("clockid: %d abs:%d\n", clockid, abs);
+
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int ret, nsfd;
+
+       nscheck();
+
+       ksft_set_plan(4);
+
+       check_config_posix_timers();
+
+       if (unshare_timens())
+               return 1;
+
+       if (_settime(CLOCK_MONOTONIC, 7 * 24 * 3600))
+               return 1;
+       if (_settime(CLOCK_BOOTTIME, 9 * 24 * 3600))
+               return 1;
+
+       nsfd = open("/proc/self/ns/time_for_children", O_RDONLY);
+       if (nsfd < 0)
+               return pr_perror("Unable to open timens_for_children");
+
+       if (setns(nsfd, CLONE_NEWTIME))
+               return pr_perror("Unable to set timens");
+
+       ret = 0;
+       ret |= run_test(CLOCK_MONOTONIC, 0);
+       ret |= run_test(CLOCK_MONOTONIC, 1);
+       ret |= run_test(CLOCK_BOOTTIME_ALARM, 0);
+       ret |= run_test(CLOCK_BOOTTIME_ALARM, 1);
+
+       if (ret)
+               ksft_exit_fail();
+       ksft_exit_pass();
+       return ret;
+}
diff --git a/tools/testing/selftests/timens/config b/tools/testing/selftests/timens/config
new file mode 100644 (file)
index 0000000..4480620
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_TIME_NS=y
diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c
new file mode 100644 (file)
index 0000000..87b47b5
--- /dev/null
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <time.h>
+#include <string.h>
+
+#include "log.h"
+#include "timens.h"
+
+#define OFFSET (36000)
+
+int main(int argc, char *argv[])
+{
+       struct timespec now, tst;
+       int status, i;
+       pid_t pid;
+
+       if (argc > 1) {
+               if (sscanf(argv[1], "%ld", &now.tv_sec) != 1)
+                       return pr_perror("sscanf");
+
+               for (i = 0; i < 2; i++) {
+                       _gettime(CLOCK_MONOTONIC, &tst, i);
+                       if (abs(tst.tv_sec - now.tv_sec) > 5)
+                               return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
+               }
+               return 0;
+       }
+
+       nscheck();
+
+       ksft_set_plan(1);
+
+       clock_gettime(CLOCK_MONOTONIC, &now);
+
+       if (unshare_timens())
+               return 1;
+
+       if (_settime(CLOCK_MONOTONIC, OFFSET))
+               return 1;
+
+       for (i = 0; i < 2; i++) {
+               _gettime(CLOCK_MONOTONIC, &tst, i);
+               if (abs(tst.tv_sec - now.tv_sec) > 5)
+                       return pr_fail("%ld %ld\n",
+                                       now.tv_sec, tst.tv_sec);
+       }
+
+       if (argc > 1)
+               return 0;
+
+       pid = fork();
+       if (pid < 0)
+               return pr_perror("fork");
+
+       if (pid == 0) {
+               char now_str[64];
+               char *cargv[] = {"exec", now_str, NULL};
+               char *cenv[] = {NULL};
+
+               /* Check that a child process is in the new timens. */
+               for (i = 0; i < 2; i++) {
+                       _gettime(CLOCK_MONOTONIC, &tst, i);
+                       if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
+                               return pr_fail("%ld %ld\n",
+                                               now.tv_sec + OFFSET, tst.tv_sec);
+               }
+
+               /* Check for proper vvar offsets after execve. */
+               snprintf(now_str, sizeof(now_str), "%ld", now.tv_sec + OFFSET);
+               execve("/proc/self/exe", cargv, cenv);
+               return pr_perror("execve");
+       }
+
+       if (waitpid(pid, &status, 0) != pid)
+               return pr_perror("waitpid");
+
+       if (status)
+               ksft_exit_fail();
+
+       ksft_test_result_pass("exec\n");
+       ksft_exit_pass();
+       return 0;
+}
diff --git a/tools/testing/selftests/timens/gettime_perf.c b/tools/testing/selftests/timens/gettime_perf.c
new file mode 100644 (file)
index 0000000..7bf841a
--- /dev/null
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <time.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <dlfcn.h>
+
+#include "log.h"
+#include "timens.h"
+
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+
+vgettime_t vdso_clock_gettime;
+
+static void fill_function_pointers(void)
+{
+       void *vdso = dlopen("linux-vdso.so.1",
+                           RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+       if (!vdso)
+               vdso = dlopen("linux-gate.so.1",
+                             RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+       if (!vdso) {
+               pr_err("[WARN]\tfailed to find vDSO\n");
+               return;
+       }
+
+       vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+       if (!vdso_clock_gettime)
+               pr_err("Warning: failed to find clock_gettime in vDSO\n");
+
+}
+
+static void test(clock_t clockid, char *clockstr, bool in_ns)
+{
+       struct timespec tp, start;
+       long i = 0;
+       const int timeout = 3;
+
+       vdso_clock_gettime(clockid, &start);
+       tp = start;
+       for (tp = start; start.tv_sec + timeout > tp.tv_sec ||
+                        (start.tv_sec + timeout == tp.tv_sec &&
+                         start.tv_nsec > tp.tv_nsec); i++) {
+               vdso_clock_gettime(clockid, &tp);
+       }
+
+       ksft_test_result_pass("%s:\tclock: %10s\tcycles:\t%10ld\n",
+                             in_ns ? "ns" : "host", clockstr, i);
+}
+
+int main(int argc, char *argv[])
+{
+       time_t offset = 10;
+       int nsfd;
+
+       ksft_set_plan(8);
+
+       fill_function_pointers();
+
+       test(CLOCK_MONOTONIC, "monotonic", false);
+       test(CLOCK_MONOTONIC_COARSE, "monotonic-coarse", false);
+       test(CLOCK_MONOTONIC_RAW, "monotonic-raw", false);
+       test(CLOCK_BOOTTIME, "boottime", false);
+
+       nscheck();
+
+       if (unshare_timens())
+               return 1;
+
+       nsfd = open("/proc/self/ns/time_for_children", O_RDONLY);
+       if (nsfd < 0)
+               return pr_perror("Can't open a time namespace");
+
+       if (_settime(CLOCK_MONOTONIC, offset))
+               return 1;
+       if (_settime(CLOCK_BOOTTIME, offset))
+               return 1;
+
+       if (setns(nsfd, CLONE_NEWTIME))
+               return pr_perror("setns");
+
+       test(CLOCK_MONOTONIC, "monotonic", true);
+       test(CLOCK_MONOTONIC_COARSE, "monotonic-coarse", true);
+       test(CLOCK_MONOTONIC_RAW, "monotonic-raw", true);
+       test(CLOCK_BOOTTIME, "boottime", true);
+
+       ksft_exit_pass();
+       return 0;
+}
diff --git a/tools/testing/selftests/timens/log.h b/tools/testing/selftests/timens/log.h
new file mode 100644 (file)
index 0000000..db64df2
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SELFTEST_TIMENS_LOG_H__
+#define __SELFTEST_TIMENS_LOG_H__
+
+#define pr_msg(fmt, lvl, ...)                                          \
+       ksft_print_msg("[%s] (%s:%d)\t" fmt "\n",                       \
+                       lvl, __FILE__, __LINE__, ##__VA_ARGS__)
+
+#define pr_p(func, fmt, ...)   func(fmt ": %m", ##__VA_ARGS__)
+
+#define pr_err(fmt, ...)                                               \
+       ({                                                              \
+               ksft_test_result_error(fmt "\n", ##__VA_ARGS__);                \
+               -1;                                                     \
+       })
+
+#define pr_fail(fmt, ...)                                      \
+       ({                                                      \
+               ksft_test_result_fail(fmt, ##__VA_ARGS__);      \
+               -1;                                             \
+       })
+
+#define pr_perror(fmt, ...)    pr_p(pr_err, fmt, ##__VA_ARGS__)
+
+#endif
diff --git a/tools/testing/selftests/timens/procfs.c b/tools/testing/selftests/timens/procfs.c
new file mode 100644 (file)
index 0000000..43d93f4
--- /dev/null
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <math.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <time.h>
+
+#include "log.h"
+#include "timens.h"
+
+/*
+ * Test shouldn't be run for a day, so add 10 days to child
+ * time and check parent's time to be in the same day.
+ */
+#define MAX_TEST_TIME_SEC              (60*5)
+#define DAY_IN_SEC                     (60*60*24)
+#define TEN_DAYS_IN_SEC                        (10*DAY_IN_SEC)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+static int child_ns, parent_ns;
+
+static int switch_ns(int fd)
+{
+       if (setns(fd, CLONE_NEWTIME))
+               return pr_perror("setns()");
+
+       return 0;
+}
+
+static int init_namespaces(void)
+{
+       char path[] = "/proc/self/ns/time_for_children";
+       struct stat st1, st2;
+
+       parent_ns = open(path, O_RDONLY);
+       if (parent_ns <= 0)
+               return pr_perror("Unable to open %s", path);
+
+       if (fstat(parent_ns, &st1))
+               return pr_perror("Unable to stat the parent timens");
+
+       if (unshare_timens())
+               return -1;
+
+       child_ns = open(path, O_RDONLY);
+       if (child_ns <= 0)
+               return pr_perror("Unable to open %s", path);
+
+       if (fstat(child_ns, &st2))
+               return pr_perror("Unable to stat the timens");
+
+       if (st1.st_ino == st2.st_ino)
+               return pr_err("The same child_ns after CLONE_NEWTIME");
+
+       if (_settime(CLOCK_BOOTTIME, TEN_DAYS_IN_SEC))
+               return -1;
+
+       return 0;
+}
+
+static int read_proc_uptime(struct timespec *uptime)
+{
+       unsigned long up_sec, up_nsec;
+       FILE *proc;
+
+       proc = fopen("/proc/uptime", "r");
+       if (proc == NULL) {
+               pr_perror("Unable to open /proc/uptime");
+               return -1;
+       }
+
+       if (fscanf(proc, "%lu.%02lu", &up_sec, &up_nsec) != 2) {
+               if (errno) {
+                       pr_perror("fscanf");
+                       return -errno;
+               }
+               pr_err("failed to parse /proc/uptime");
+               return -1;
+       }
+       fclose(proc);
+
+       uptime->tv_sec = up_sec;
+       uptime->tv_nsec = up_nsec;
+       return 0;
+}
+
+static int check_uptime(void)
+{
+       struct timespec uptime_new, uptime_old;
+       time_t uptime_expected;
+       double prec = MAX_TEST_TIME_SEC;
+
+       if (switch_ns(parent_ns))
+               return pr_err("switch_ns(%d)", parent_ns);
+
+       if (read_proc_uptime(&uptime_old))
+               return 1;
+
+       if (switch_ns(child_ns))
+               return pr_err("switch_ns(%d)", child_ns);
+
+       if (read_proc_uptime(&uptime_new))
+               return 1;
+
+       uptime_expected = uptime_old.tv_sec + TEN_DAYS_IN_SEC;
+       if (fabs(difftime(uptime_new.tv_sec, uptime_expected)) > prec) {
+               pr_fail("uptime in /proc/uptime: old %ld, new %ld [%ld]",
+                       uptime_old.tv_sec, uptime_new.tv_sec,
+                       uptime_old.tv_sec + TEN_DAYS_IN_SEC);
+               return 1;
+       }
+
+       ksft_test_result_pass("Passed for /proc/uptime\n");
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int ret = 0;
+
+       nscheck();
+
+       ksft_set_plan(1);
+
+       if (init_namespaces())
+               return 1;
+
+       ret |= check_uptime();
+
+       if (ret)
+               ksft_exit_fail();
+       ksft_exit_pass();
+       return ret;
+}
diff --git a/tools/testing/selftests/timens/timens.c b/tools/testing/selftests/timens/timens.c
new file mode 100644 (file)
index 0000000..559d26e
--- /dev/null
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <time.h>
+#include <string.h>
+
+#include "log.h"
+#include "timens.h"
+
+/*
+ * Test shouldn't be run for a day, so add 10 days to child
+ * time and check parent's time to be in the same day.
+ */
+#define DAY_IN_SEC                     (60*60*24)
+#define TEN_DAYS_IN_SEC                        (10*DAY_IN_SEC)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+struct test_clock {
+       clockid_t id;
+       char *name;
+       /*
+        * off_id is -1 if a clock has own offset, or it contains an index
+        * which contains a right offset of this clock.
+        */
+       int off_id;
+       time_t offset;
+};
+
+#define ct(clock, off_id)      { clock, #clock, off_id }
+static struct test_clock clocks[] = {
+       ct(CLOCK_BOOTTIME, -1),
+       ct(CLOCK_BOOTTIME_ALARM, 1),
+       ct(CLOCK_MONOTONIC, -1),
+       ct(CLOCK_MONOTONIC_COARSE, 1),
+       ct(CLOCK_MONOTONIC_RAW, 1),
+};
+#undef ct
+
+static int child_ns, parent_ns = -1;
+
+static int switch_ns(int fd)
+{
+       if (setns(fd, CLONE_NEWTIME)) {
+               pr_perror("setns()");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int init_namespaces(void)
+{
+       char path[] = "/proc/self/ns/time_for_children";
+       struct stat st1, st2;
+
+       if (parent_ns == -1) {
+               parent_ns = open(path, O_RDONLY);
+               if (parent_ns <= 0)
+                       return pr_perror("Unable to open %s", path);
+       }
+
+       if (fstat(parent_ns, &st1))
+               return pr_perror("Unable to stat the parent timens");
+
+       if (unshare_timens())
+               return  -1;
+
+       child_ns = open(path, O_RDONLY);
+       if (child_ns <= 0)
+               return pr_perror("Unable to open %s", path);
+
+       if (fstat(child_ns, &st2))
+               return pr_perror("Unable to stat the timens");
+
+       if (st1.st_ino == st2.st_ino)
+               return pr_perror("The same child_ns after CLONE_NEWTIME");
+
+       return 0;
+}
+
+static int test_gettime(clockid_t clock_index, bool raw_syscall, time_t offset)
+{
+       struct timespec child_ts_new, parent_ts_old, cur_ts;
+       char *entry = raw_syscall ? "syscall" : "vdso";
+       double precision = 0.0;
+
+       if (check_skip(clocks[clock_index].id))
+               return 0;
+
+       switch (clocks[clock_index].id) {
+       case CLOCK_MONOTONIC_COARSE:
+       case CLOCK_MONOTONIC_RAW:
+               precision = -2.0;
+               break;
+       }
+
+       if (switch_ns(parent_ns))
+               return pr_err("switch_ns(%d)", child_ns);
+
+       if (_gettime(clocks[clock_index].id, &parent_ts_old, raw_syscall))
+               return -1;
+
+       child_ts_new.tv_nsec = parent_ts_old.tv_nsec;
+       child_ts_new.tv_sec = parent_ts_old.tv_sec + offset;
+
+       if (switch_ns(child_ns))
+               return pr_err("switch_ns(%d)", child_ns);
+
+       if (_gettime(clocks[clock_index].id, &cur_ts, raw_syscall))
+               return -1;
+
+       if (difftime(cur_ts.tv_sec, child_ts_new.tv_sec) < precision) {
+               ksft_test_result_fail(
+                       "Child's %s (%s) time has not changed: %lu -> %lu [%lu]\n",
+                       clocks[clock_index].name, entry, parent_ts_old.tv_sec,
+                       child_ts_new.tv_sec, cur_ts.tv_sec);
+               return -1;
+       }
+
+       if (switch_ns(parent_ns))
+               return pr_err("switch_ns(%d)", parent_ns);
+
+       if (_gettime(clocks[clock_index].id, &cur_ts, raw_syscall))
+               return -1;
+
+       if (difftime(cur_ts.tv_sec, parent_ts_old.tv_sec) > DAY_IN_SEC) {
+               ksft_test_result_fail(
+                       "Parent's %s (%s) time has changed: %lu -> %lu [%lu]\n",
+                       clocks[clock_index].name, entry, parent_ts_old.tv_sec,
+                       child_ts_new.tv_sec, cur_ts.tv_sec);
+               /* Let's play nice and put it closer to original */
+               clock_settime(clocks[clock_index].id, &cur_ts);
+               return -1;
+       }
+
+       ksft_test_result_pass("Passed for %s (%s)\n",
+                               clocks[clock_index].name, entry);
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       unsigned int i;
+       time_t offset;
+       int ret = 0;
+
+       nscheck();
+
+       check_config_posix_timers();
+
+       ksft_set_plan(ARRAY_SIZE(clocks) * 2);
+
+       if (init_namespaces())
+               return 1;
+
+       /* Offsets have to be set before tasks enter the namespace. */
+       for (i = 0; i < ARRAY_SIZE(clocks); i++) {
+               if (clocks[i].off_id != -1)
+                       continue;
+               offset = TEN_DAYS_IN_SEC + i * 1000;
+               clocks[i].offset = offset;
+               if (_settime(clocks[i].id, offset))
+                       return 1;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(clocks); i++) {
+               if (clocks[i].off_id != -1)
+                       offset = clocks[clocks[i].off_id].offset;
+               else
+                       offset = clocks[i].offset;
+               ret |= test_gettime(i, true, offset);
+               ret |= test_gettime(i, false, offset);
+       }
+
+       if (ret)
+               ksft_exit_fail();
+
+       ksft_exit_pass();
+       return !!ret;
+}
diff --git a/tools/testing/selftests/timens/timens.h b/tools/testing/selftests/timens/timens.h
new file mode 100644 (file)
index 0000000..e09e7e3
--- /dev/null
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TIMENS_H__
+#define __TIMENS_H__
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include "../kselftest.h"
+
+#ifndef CLONE_NEWTIME
+# define CLONE_NEWTIME 0x00000080
+#endif
+
+static int config_posix_timers = true;
+
+static inline void check_config_posix_timers(void)
+{
+       if (timer_create(-1, 0, 0) == -1 && errno == ENOSYS)
+               config_posix_timers = false;
+}
+
+static inline bool check_skip(int clockid)
+{
+       if (config_posix_timers)
+               return false;
+
+       switch (clockid) {
+       /* Only these clocks are supported without CONFIG_POSIX_TIMERS. */
+       case CLOCK_BOOTTIME:
+       case CLOCK_MONOTONIC:
+       case CLOCK_REALTIME:
+               return false;
+       default:
+               ksft_test_result_skip("Posix Clocks & timers are not supported\n");
+               return true;
+       }
+
+       return false;
+}
+
+static inline int unshare_timens(void)
+{
+       if (unshare(CLONE_NEWTIME)) {
+               if (errno == EPERM)
+                       ksft_exit_skip("need to run as root\n");
+               return pr_perror("Can't unshare() timens");
+       }
+       return 0;
+}
+
+static inline int _settime(clockid_t clk_id, time_t offset)
+{
+       int fd, len;
+       char buf[4096];
+
+       if (clk_id == CLOCK_MONOTONIC_COARSE || clk_id == CLOCK_MONOTONIC_RAW)
+               clk_id = CLOCK_MONOTONIC;
+
+       len = snprintf(buf, sizeof(buf), "%d %ld 0", clk_id, offset);
+
+       fd = open("/proc/self/timens_offsets", O_WRONLY);
+       if (fd < 0)
+               return pr_perror("/proc/self/timens_offsets");
+
+       if (write(fd, buf, len) != len)
+               return pr_perror("/proc/self/timens_offsets");
+
+       close(fd);
+
+       return 0;
+}
+
+static inline int _gettime(clockid_t clk_id, struct timespec *res, bool raw_syscall)
+{
+       int err;
+
+       if (!raw_syscall) {
+               if (clock_gettime(clk_id, res)) {
+                       pr_perror("clock_gettime(%d)", (int)clk_id);
+                       return -1;
+               }
+               return 0;
+       }
+
+       err = syscall(SYS_clock_gettime, clk_id, res);
+       if (err)
+               pr_perror("syscall(SYS_clock_gettime(%d))", (int)clk_id);
+
+       return err;
+}
+
+static inline void nscheck(void)
+{
+       if (access("/proc/self/ns/time", F_OK) < 0)
+               ksft_exit_skip("Time namespaces are not supported\n");
+}
+
+#endif
diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
new file mode 100644 (file)
index 0000000..0cca7aa
--- /dev/null
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <signal.h>
+#include <time.h>
+
+#include "log.h"
+#include "timens.h"
+
+int run_test(int clockid, struct timespec now)
+{
+       struct itimerspec new_value;
+       long long elapsed;
+       timer_t fd;
+       int i;
+
+       for (i = 0; i < 2; i++) {
+               struct sigevent sevp = {.sigev_notify = SIGEV_NONE};
+               int flags = 0;
+
+               new_value.it_value.tv_sec = 3600;
+               new_value.it_value.tv_nsec = 0;
+               new_value.it_interval.tv_sec = 1;
+               new_value.it_interval.tv_nsec = 0;
+
+               if (i == 1) {
+                       new_value.it_value.tv_sec += now.tv_sec;
+                       new_value.it_value.tv_nsec += now.tv_nsec;
+               }
+
+               if (timer_create(clockid, &sevp, &fd) == -1) {
+                       if (errno == ENOSYS) {
+                               ksft_test_result_skip("Posix Clocks & timers are supported\n");
+                               return 0;
+                       }
+                       return pr_perror("timerfd_create");
+               }
+
+               if (i == 1)
+                       flags |= TIMER_ABSTIME;
+               if (timer_settime(fd, flags, &new_value, NULL) == -1)
+                       return pr_perror("timerfd_settime");
+
+               if (timer_gettime(fd, &new_value) == -1)
+                       return pr_perror("timerfd_gettime");
+
+               elapsed = new_value.it_value.tv_sec;
+               if (abs(elapsed - 3600) > 60) {
+                       ksft_test_result_fail("clockid: %d elapsed: %lld\n",
+                                             clockid, elapsed);
+                       return 1;
+               }
+       }
+
+       ksft_test_result_pass("clockid=%d\n", clockid);
+
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int ret, status, len, fd;
+       char buf[4096];
+       pid_t pid;
+       struct timespec btime_now, mtime_now;
+
+       nscheck();
+
+       ksft_set_plan(3);
+
+       clock_gettime(CLOCK_MONOTONIC, &mtime_now);
+       clock_gettime(CLOCK_BOOTTIME, &btime_now);
+
+       if (unshare_timens())
+               return 1;
+
+       len = snprintf(buf, sizeof(buf), "%d %d 0\n%d %d 0",
+                       CLOCK_MONOTONIC, 70 * 24 * 3600,
+                       CLOCK_BOOTTIME, 9 * 24 * 3600);
+       fd = open("/proc/self/timens_offsets", O_WRONLY);
+       if (fd < 0)
+               return pr_perror("/proc/self/timens_offsets");
+
+       if (write(fd, buf, len) != len)
+               return pr_perror("/proc/self/timens_offsets");
+
+       close(fd);
+       mtime_now.tv_sec += 70 * 24 * 3600;
+       btime_now.tv_sec += 9 * 24 * 3600;
+
+       pid = fork();
+       if (pid < 0)
+               return pr_perror("Unable to fork");
+       if (pid == 0) {
+               ret = 0;
+               ret |= run_test(CLOCK_BOOTTIME, btime_now);
+               ret |= run_test(CLOCK_MONOTONIC, mtime_now);
+               ret |= run_test(CLOCK_BOOTTIME_ALARM, btime_now);
+
+               if (ret)
+                       ksft_exit_fail();
+               ksft_exit_pass();
+               return ret;
+       }
+
+       if (waitpid(pid, &status, 0) != pid)
+               return pr_perror("Unable to wait the child process");
+
+       if (WIFEXITED(status))
+               return WEXITSTATUS(status);
+
+       return 1;
+}
diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c
new file mode 100644 (file)
index 0000000..eff1ec5
--- /dev/null
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <sys/timerfd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#include "log.h"
+#include "timens.h"
+
+static int tclock_gettime(clock_t clockid, struct timespec *now)
+{
+       if (clockid == CLOCK_BOOTTIME_ALARM)
+               clockid = CLOCK_BOOTTIME;
+       return clock_gettime(clockid, now);
+}
+
+int run_test(int clockid, struct timespec now)
+{
+       struct itimerspec new_value;
+       long long elapsed;
+       int fd, i;
+
+       if (tclock_gettime(clockid, &now))
+               return pr_perror("clock_gettime(%d)", clockid);
+
+       for (i = 0; i < 2; i++) {
+               int flags = 0;
+
+               new_value.it_value.tv_sec = 3600;
+               new_value.it_value.tv_nsec = 0;
+               new_value.it_interval.tv_sec = 1;
+               new_value.it_interval.tv_nsec = 0;
+
+               if (i == 1) {
+                       new_value.it_value.tv_sec += now.tv_sec;
+                       new_value.it_value.tv_nsec += now.tv_nsec;
+               }
+
+               fd = timerfd_create(clockid, 0);
+               if (fd == -1)
+                       return pr_perror("timerfd_create(%d)", clockid);
+
+               if (i == 1)
+                       flags |= TFD_TIMER_ABSTIME;
+
+               if (timerfd_settime(fd, flags, &new_value, NULL))
+                       return pr_perror("timerfd_settime(%d)", clockid);
+
+               if (timerfd_gettime(fd, &new_value))
+                       return pr_perror("timerfd_gettime(%d)", clockid);
+
+               elapsed = new_value.it_value.tv_sec;
+               if (abs(elapsed - 3600) > 60) {
+                       ksft_test_result_fail("clockid: %d elapsed: %lld\n",
+                                             clockid, elapsed);
+                       return 1;
+               }
+
+               close(fd);
+       }
+
+       ksft_test_result_pass("clockid=%d\n", clockid);
+
+       return 0;
+}
+
+int main(int argc, char *argv[])
+{
+       int ret, status, len, fd;
+       char buf[4096];
+       pid_t pid;
+       struct timespec btime_now, mtime_now;
+
+       nscheck();
+
+       ksft_set_plan(3);
+
+       clock_gettime(CLOCK_MONOTONIC, &mtime_now);
+       clock_gettime(CLOCK_BOOTTIME, &btime_now);
+
+       if (unshare_timens())
+               return 1;
+
+       len = snprintf(buf, sizeof(buf), "%d %d 0\n%d %d 0",
+                       CLOCK_MONOTONIC, 70 * 24 * 3600,
+                       CLOCK_BOOTTIME, 9 * 24 * 3600);
+       fd = open("/proc/self/timens_offsets", O_WRONLY);
+       if (fd < 0)
+               return pr_perror("/proc/self/timens_offsets");
+
+       if (write(fd, buf, len) != len)
+               return pr_perror("/proc/self/timens_offsets");
+
+       close(fd);
+       mtime_now.tv_sec += 70 * 24 * 3600;
+       btime_now.tv_sec += 9 * 24 * 3600;
+
+       pid = fork();
+       if (pid < 0)
+               return pr_perror("Unable to fork");
+       if (pid == 0) {
+               ret = 0;
+               ret |= run_test(CLOCK_BOOTTIME, btime_now);
+               ret |= run_test(CLOCK_MONOTONIC, mtime_now);
+               ret |= run_test(CLOCK_BOOTTIME_ALARM, btime_now);
+
+               if (ret)
+                       ksft_exit_fail();
+               ksft_exit_pass();
+               return ret;
+       }
+
+       if (waitpid(pid, &status, 0) != pid)
+               return pr_perror("Unable to wait the child process");
+
+       if (WIFEXITED(status))
+               return WEXITSTATUS(status);
+
+       return 1;
+}
index 80521d46220ccc0a33edb79f8f4dbd993f39ebd7..8155c2ea7ccbb6ed1b9685f3c602105eb2c26173 100755 (executable)
@@ -2,3 +2,9 @@
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 python -m unittest -v tpm2_tests.SmokeTest
+python -m unittest -v tpm2_tests.AsyncTest
+
+CLEAR_CMD=$(which tpm2_clear)
+if [ -n $CLEAR_CMD ]; then
+       tpm2_clear -T device
+fi
index 828c185846248031ff598670d393e34a38fa24df..d0fcb66a88a68a0bf9e57d5da24aadfd695c12f9 100644 (file)
@@ -6,8 +6,8 @@ import socket
 import struct
 import sys
 import unittest
-from fcntl import ioctl
-
+import fcntl
+import select
 
 TPM2_ST_NO_SESSIONS = 0x8001
 TPM2_ST_SESSIONS = 0x8002
@@ -352,6 +352,7 @@ def hex_dump(d):
 class Client:
     FLAG_DEBUG = 0x01
     FLAG_SPACE = 0x02
+    FLAG_NONBLOCK = 0x04
     TPM_IOC_NEW_SPACE = 0xa200
 
     def __init__(self, flags = 0):
@@ -362,13 +363,27 @@ class Client:
         else:
             self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0)
 
+        if (self.flags & Client.FLAG_NONBLOCK):
+            flags = fcntl.fcntl(self.tpm, fcntl.F_GETFL)
+            flags |= os.O_NONBLOCK
+            fcntl.fcntl(self.tpm, fcntl.F_SETFL, flags)
+            self.tpm_poll = select.poll()
+
     def close(self):
         self.tpm.close()
 
     def send_cmd(self, cmd):
         self.tpm.write(cmd)
+
+        if (self.flags & Client.FLAG_NONBLOCK):
+            self.tpm_poll.register(self.tpm, select.POLLIN)
+            self.tpm_poll.poll(10000)
+
         rsp = self.tpm.read()
 
+        if (self.flags & Client.FLAG_NONBLOCK):
+            self.tpm_poll.unregister(self.tpm)
+
         if (self.flags & Client.FLAG_DEBUG) != 0:
             sys.stderr.write('cmd' + os.linesep)
             sys.stderr.write(hex_dump(cmd) + os.linesep)
index d4973be53493226b19dcca5e7140e997ea964e8b..728be7c69b764fe48592bd22e20d01a0fe19d68c 100644 (file)
@@ -288,3 +288,16 @@ class SpaceTest(unittest.TestCase):
 
         self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
                          tpm2.TSS2_RESMGR_TPM_RC_LAYER)
+
+class AsyncTest(unittest.TestCase):
+    def setUp(self):
+        logging.basicConfig(filename='AsyncTest.log', level=logging.DEBUG)
+
+    def test_async(self):
+        log = logging.getLogger(__name__)
+        log.debug(sys._getframe().f_code.co_name)
+
+        async_client = tpm2.Client(tpm2.Client.FLAG_NONBLOCK)
+        log.debug("Calling get_cap in a NON_BLOCKING mode")
+        async_client.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_LOADED_SESSION)
+        async_client.close()
index 0aad760fcd8c3704a6445e1a5218082d3d069063..2bbac73e6477790795a8ad56c2da524cabcda9e3 100755 (executable)
@@ -128,7 +128,7 @@ parse() {
                        str="${ftype} ${name} ${location} ${str}"
                        ;;
                "nod")
-                       local dev=`LC_ALL=C ls -l "${location}"`
+                       local dev="`LC_ALL=C ls -l "${location}"`"
                        local maj=`field 5 ${dev}`
                        local min=`field 6 ${dev}`
                        maj=${maj%,}
index 4a753a48767b76659d7f9d60975121644fbca3dd..84598469e6ff35d6ffa3cbf87bfe46306f31dd43 100644 (file)
@@ -91,7 +91,7 @@ endif
 # asm-generic/*.h is used by asm/*.h, and should not be included directly
 header-test- += asm-generic/%
 
-extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h'))
+extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h' 2>/dev/null))
 
 quiet_cmd_hdrtest = HDRTEST $<
       cmd_hdrtest = \
index 12e0280291cee9c0670f0d5c222ff6f24e4c0060..8de4daf25097d9267cf097d3be879f4372e3ff6a 100644 (file)
@@ -1352,7 +1352,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 }
 
-static void cpu_init_hyp_mode(void *dummy)
+static void cpu_init_hyp_mode(void)
 {
        phys_addr_t pgd_ptr;
        unsigned long hyp_stack_ptr;
@@ -1386,7 +1386,7 @@ static void cpu_hyp_reinit(void)
        if (is_kernel_in_hyp_mode())
                kvm_timer_init_vhe();
        else
-               cpu_init_hyp_mode(NULL);
+               cpu_init_hyp_mode();
 
        kvm_arm_init_debug();
 
index 38b4c910b6c3861609979f6292f81462ee39d595..0b32a904a1bb3a5636363803eceb34a45766cd25 100644 (file)
@@ -38,6 +38,11 @@ static unsigned long io_map_base;
 #define KVM_S2PTE_FLAG_IS_IOMAP                (1UL << 0)
 #define KVM_S2_FLAG_LOGGING_ACTIVE     (1UL << 1)
 
+static bool is_iomap(unsigned long flags)
+{
+       return flags & KVM_S2PTE_FLAG_IS_IOMAP;
+}
+
 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
 {
        return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
        vma_pagesize = vma_kernel_pagesize(vma);
        if (logging_active ||
+           (vma->vm_flags & VM_PFNMAP) ||
            !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
                force_pte = true;
                vma_pagesize = PAGE_SIZE;
@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        writable = false;
        }
 
+       if (exec_fault && is_iomap(flags))
+               return -ENOEXEC;
+
        spin_lock(&kvm->mmu_lock);
        if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (writable)
                kvm_set_pfn_dirty(pfn);
 
-       if (fault_status != FSC_PERM)
+       if (fault_status != FSC_PERM && !is_iomap(flags))
                clean_dcache_guest_page(pfn, vma_pagesize);
 
        if (exec_fault)
@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
                if (is_iabt) {
                        /* Prefetch Abort on I/O address */
-                       kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-                       ret = 1;
-                       goto out_unlock;
+                       ret = -ENOEXEC;
+                       goto out;
                }
 
                /*
@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
        if (ret == 0)
                ret = 1;
+out:
+       if (ret == -ENOEXEC) {
+               kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+               ret = 1;
+       }
 out_unlock:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        return ret;
@@ -2301,15 +2314,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                if (!vma || vma->vm_start >= reg_end)
                        break;
 
-               /*
-                * Mapping a read-only VMA is only allowed if the
-                * memory region is configured as read-only.
-                */
-               if (writable && !(vma->vm_flags & VM_WRITE)) {
-                       ret = -EPERM;
-                       break;
-               }
-
                /*
                 * Take the intersection of this VMA with the memory region
                 */
index b3c5de48064c91b3a0b32fae72590708f7783695..a963b9d766b73a75d93a500cdaf0c2cc49ba8fdc 100644 (file)
@@ -70,7 +70,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
  */
 int kvm_vgic_create(struct kvm *kvm, u32 type)
 {
-       int i, vcpu_lock_idx = -1, ret;
+       int i, ret;
        struct kvm_vcpu *vcpu;
 
        if (irqchip_in_kernel(kvm))
@@ -86,17 +86,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                !kvm_vgic_global_state.can_emulate_gicv2)
                return -ENODEV;
 
-       /*
-        * Any time a vcpu is run, vcpu_load is called which tries to grab the
-        * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
-        * that no other VCPUs are run while we create the vgic.
-        */
        ret = -EBUSY;
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (!mutex_trylock(&vcpu->mutex))
-                       goto out_unlock;
-               vcpu_lock_idx = i;
-       }
+       if (!lock_all_vcpus(kvm))
+               return ret;
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (vcpu->arch.has_run_once)
@@ -125,10 +117,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
 
 out_unlock:
-       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-               vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
-               mutex_unlock(&vcpu->mutex);
-       }
+       unlock_all_vcpus(kvm);
        return ret;
 }
 
@@ -177,6 +166,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
                        break;
                default:
                        kfree(dist->spis);
+                       dist->spis = NULL;
                        return -EINVAL;
                }
        }